summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/idt77105.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/cacheinfo.c2
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bus/mvebu-mbus.c109
-rw-r--r--drivers/dma/hsu/hsu.c5
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/firmware/iscsi_ibft.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c17
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/imu/adis16400.h2
-rw-r--r--drivers/iio/imu/adis16400_buffer.c26
-rw-r--r--drivers/iio/imu/adis16400_core.c41
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c38
-rw-r--r--drivers/infiniband/hw/mlx4/main.c43
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c297
-rw-r--r--drivers/infiniband/hw/mlx5/main.c541
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/input/mouse/alps.c5
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/isdn/hisax/st5481_usb.c4
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_netlink.c15
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c32
-rw-r--r--drivers/net/can/spi/mcp251x.c9
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.h8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c67
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c19
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h16
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c71
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h23
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h84
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h176
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h199
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h70
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c673
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h19
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c119
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c67
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h62
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig39
-rw-r--r--drivers/net/ethernet/cavium/Makefile2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c796
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h107
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h535
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c198
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h33
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1216
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3668
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h673
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h57
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h424
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c723
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1309
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h649
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c989
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h426
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h319
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c199
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h75
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h224
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c189
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h227
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c766
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c178
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c125
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c328
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h63
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c87
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c38
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c66
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c154
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h272
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c810
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c137
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.h41
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/renesas/Kconfig31
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h832
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1824
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c357
-rw-r--r--drivers/net/ethernet/rocker/rocker.c253
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c75
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c24
-rw-r--r--drivers/net/ethernet/via/Kconfig4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/fddi/skfp/srf.c2
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/phy/bcm7xxx.c7
-rw-r--r--drivers/net/phy/davicom.c13
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c8
-rw-r--r--drivers/net/phy/micrel.c53
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/dscc4.c9
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wireless/adm8211.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c24
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c20
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c4
-rw-r--r--drivers/net/wireless/b43/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c7
-rw-r--r--drivers/net/wireless/cw1200/main.c16
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c32
-rw-r--r--drivers/net/wireless/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c10
-rw-r--r--drivers/net/wireless/mwl8k.c9
-rw-r--r--drivers/net/wireless/p54/main.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c26
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/xen-netback/netback.c19
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/Makefile4
-rw-r--r--drivers/nfc/microread/i2c.c3
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig11
-rw-r--r--drivers/nfc/nfcmrvl/Makefile3
-rw-r--r--drivers/nfc/nfcmrvl/main.c134
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h60
-rw-r--r--drivers/nfc/nfcmrvl/uart.c225
-rw-r--r--drivers/nfc/nfcmrvl/usb.c27
-rw-r--r--drivers/nfc/nxp-nci/Makefile2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c52
-rw-r--r--drivers/nfc/pn544/i2c.c43
-rw-r--r--drivers/nfc/st-nci/Kconfig23
-rw-r--r--drivers/nfc/st-nci/Makefile9
-rw-r--r--drivers/nfc/st-nci/core.c179
-rw-r--r--drivers/nfc/st-nci/i2c.c (renamed from drivers/nfc/st21nfcb/i2c.c)147
-rw-r--r--drivers/nfc/st-nci/ndlc.c (renamed from drivers/nfc/st21nfcb/ndlc.c)23
-rw-r--r--drivers/nfc/st-nci/ndlc.h (renamed from drivers/nfc/st21nfcb/ndlc.h)5
-rw-r--r--drivers/nfc/st-nci/st-nci.h (renamed from drivers/nfc/st21nfcb/st21nfcb.h)30
-rw-r--r--drivers/nfc/st-nci/st-nci_se.c (renamed from drivers/nfc/st21nfcb/st21nfcb_se.c)383
-rw-r--r--drivers/nfc/st-nci/st-nci_se.h61
-rw-r--r--drivers/nfc/st21nfcb/Kconfig22
-rw-r--r--drivers/nfc/st21nfcb/Makefile9
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb.c143
-rw-r--r--drivers/nfc/st21nfcb/st21nfcb_se.h61
-rw-r--r--drivers/nfc/trf7970a.c23
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/pci/setup-bus.c9
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/phy-omap-usb2.c1
-rw-r--r--drivers/phy/phy-rcar-gen2.c4
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c54
-rw-r--r--drivers/staging/ozwpan/ozhcd.c8
-rw-r--r--drivers/staging/ozwpan/ozusbif.h4
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c19
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c144
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c2
-rw-r--r--drivers/staging/vt6655/device_main.c8
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/tty/n_tty.c21
-rw-r--r--drivers/tty/serial/8250/8250_omap.c82
-rw-r--r--drivers/tty/serial/amba-pl011.c16
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/gadget/function/f_fs.c15
-rw-r--r--drivers/usb/gadget/function/f_midi.c8
-rw-r--r--drivers/usb/gadget/function/f_uac1.c5
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c4
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/xhci.c57
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c6
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c38
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/virtio/virtio_pci_common.c1
325 files changed, 24579 insertions, 4066 deletions
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 23716dd8a7ec..5928d0746a27 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
writel((cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->mmio + AHCI_WINDOW_CTRL(i));
- writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+ writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
writel(((cs->size - 1) & 0xffff0000),
hpriv->mmio + AHCI_WINDOW_SIZE(i));
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 80a80548ad0a..27245957eee3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 0237271e80fc..a8da3a50e374 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -117,7 +117,7 @@ static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
static bool irq_coalesce = true;
-static bool sdh = 0;
+static bool sdh;
/* Read from EEPROM = 0000 0011b */
static unsigned int readtab[] = {
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 909c95bd7be2..feb023d7eebd 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -306,14 +306,12 @@ static int idt77105_start(struct atm_dev *dev)
if (start_timer) {
start_timer = 0;
- init_timer(&stats_timer);
+ setup_timer(&stats_timer, idt77105_stats_timer_func, 0UL);
stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD;
- stats_timer.function = idt77105_stats_timer_func;
add_timer(&stats_timer);
- init_timer(&restart_timer);
+ setup_timer(&restart_timer, idt77105_restart_timer_func, 0UL);
restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD;
- restart_timer.function = idt77105_restart_timer_func;
add_timer(&restart_timer);
}
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 924f8e26789d..65e65903faa0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2618,7 +2618,7 @@ static void ia_close(struct atm_vcc *vcc)
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
iadev->close_pending++;
prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
- schedule_timeout(50);
+ schedule_timeout(msecs_to_jiffies(500));
finish_wait(&iadev->timeout_wait, &wait);
spin_lock_irqsave(&iadev->tx_lock, flags);
while((skb = skb_dequeue(&iadev->tx_backlog))) {
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 9c2ba1c97c42..df0c66cb7ad3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
{
int ret;
- if (init_cache_level(cpu))
+ if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
diff --git a/drivers/base/init.c b/drivers/base/init.c
index da033d3bab3c..48c0e220acc0 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/memory.h>
+#include <linux/of.h>
#include "base.h"
@@ -34,4 +35,5 @@ void __init driver_init(void)
cpu_dev_init();
memory_dev_init();
container_dev_init();
+ of_core_init();
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index eb1fed5bd516..3ccef9eba6f9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
config BLK_DEV_PMEM
tristate "Persistent memory block device support"
+ depends on HAS_IOMEM
help
Saying Y here will allow you to use a contiguous range of reserved
memory as one or more persistent block devices.
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 85b8036deaa3..683dff272562 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_iod *iod;
dma_addr_t meta_dma = 0;
void *meta = NULL;
+ void __user *metadata;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0;
}
+ metadata = (void __user *)(unsigned long)io.metadata;
+
write = io.opcode & 1;
switch (io.opcode) {
@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (meta_len) {
meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
&meta_dma, GFP_KERNEL);
+
if (!meta) {
status = -ENOMEM;
goto unmap;
}
if (write) {
- if (copy_from_user(meta, (void __user *)io.metadata,
- meta_len)) {
+ if (copy_from_user(meta, metadata, meta_len)) {
status = -EFAULT;
goto unmap;
}
@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
nvme_free_iod(dev, iod);
if (meta) {
if (status == NVME_SC_SUCCESS && !write) {
- if (copy_to_user((void __user *)io.metadata, meta,
- meta_len))
+ if (copy_to_user(metadata, meta, meta_len))
status = -EFAULT;
}
dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8dcbced0eafd..6e134f4759c0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
zram->max_comp_streams = 1;
+
set_capacity(zram->disk, 0);
+ part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index fb9ec6221730..6f047dcb94c2 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -58,7 +58,6 @@
#include <linux/debugfs.h>
#include <linux/log2.h>
#include <linux/syscore_ops.h>
-#include <linux/memblock.h>
/*
* DDR target is the same on all platforms.
@@ -70,6 +69,7 @@
*/
#define WIN_CTRL_OFF 0x0000
#define WIN_CTRL_ENABLE BIT(0)
+/* Only on HW I/O coherency capable platforms */
#define WIN_CTRL_SYNCBARRIER BIT(1)
#define WIN_CTRL_TGT_MASK 0xf0
#define WIN_CTRL_TGT_SHIFT 4
@@ -102,9 +102,7 @@
/* Relative to mbusbridge_base */
#define MBUS_BRIDGE_CTRL_OFF 0x0
-#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
#define MBUS_BRIDGE_BASE_OFF 0x4
-#define MBUS_BRIDGE_BASE_MASK 0xffff0000
/* Maximum number of windows, for all known platforms */
#define MBUS_WINS_MAX 20
@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
(attr << WIN_CTRL_ATTR_SHIFT) |
(target << WIN_CTRL_TGT_SHIFT) |
- WIN_CTRL_SYNCBARRIER |
WIN_CTRL_ENABLE;
+ if (mbus->hw_io_coherency)
+ ctrl |= WIN_CTRL_SYNCBARRIER;
writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
writel(ctrl, addr + WIN_CTRL_OFF);
@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
return MVEBU_MBUS_NO_REMAP;
}
-/*
- * Use the memblock information to find the MBus bridge hole in the
- * physical address space.
- */
-static void __init
-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
-{
- struct memblock_region *r;
- uint64_t s = 0;
-
- for_each_memblock(memory, r) {
- /*
- * This part of the memory is above 4 GB, so we don't
- * care for the MBus bridge hole.
- */
- if (r->base >= 0x100000000)
- continue;
-
- /*
- * The MBus bridge hole is at the end of the RAM under
- * the 4 GB limit.
- */
- if (r->base + r->size > s)
- s = r->base + r->size;
- }
-
- *start = s;
- *end = 0x100000000;
-}
-
static void __init
mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
{
int i;
int cs;
- uint64_t mbus_bridge_base, mbus_bridge_end;
mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
- mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
-
for (i = 0, cs = 0; i < 4; i++) {
- u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
- u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
- u64 end;
- struct mbus_dram_window *w;
-
- /* Ignore entries that are not enabled */
- if (!(size & DDR_SIZE_ENABLED))
- continue;
-
- /*
- * Ignore entries whose base address is above 2^32,
- * since devices cannot DMA to such high addresses
- */
- if (base & DDR_BASE_CS_HIGH_MASK)
- continue;
-
- base = base & DDR_BASE_CS_LOW_MASK;
- size = (size | ~DDR_SIZE_MASK) + 1;
- end = base + size;
-
- /*
- * Adjust base/size of the current CS to make sure it
- * doesn't overlap with the MBus bridge hole. This is
- * particularly important for devices that do DMA from
- * DRAM to a SRAM mapped in a MBus window, such as the
- * CESA cryptographic engine.
- */
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
/*
- * The CS is fully enclosed inside the MBus bridge
- * area, so ignore it.
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
*/
- if (base >= mbus_bridge_base && end <= mbus_bridge_end)
- continue;
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
- /*
- * Beginning of CS overlaps with end of MBus, raise CS
- * base address, and shrink its size.
- */
- if (base >= mbus_bridge_base && end > mbus_bridge_end) {
- size -= mbus_bridge_end - base;
- base = mbus_bridge_end;
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (size | ~DDR_SIZE_MASK) + 1;
}
-
- /*
- * End of CS overlaps with beginning of MBus, shrink
- * CS size.
- */
- if (base < mbus_bridge_base && end > mbus_bridge_base)
- size -= end - mbus_bridge_base;
-
- w = &mvebu_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- if (mbus->hw_io_coherency)
- w->mbus_attr |= ATTR_HW_COHERENCY;
- w->base = base;
- w->size = size;
}
mvebu_mbus_dram_info.num_cs = cs;
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 9b84def7a353..f42f71e37e73 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags);
hsu_dma_stop_channel(hsuc);
- hsuc->desc = NULL;
+ if (hsuc->desc) {
+ hsu_dma_desc_free(&hsuc->desc->vdesc);
+ hsuc->desc = NULL;
+ }
vchan_get_all_descriptors(&hsuc->vchan, &head);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a7d9d3029b14..340f9e607cd8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
+ pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
spin_lock(&pl330->lock);
_stop(pch->thread);
@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pl330->ddma.dev);
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 071c2c969eec..72791232e46b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -186,8 +186,20 @@ struct ibft_kobject {
static struct iscsi_boot_kset *boot_kset;
+/* fully null address */
static const char nulls[16];
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+ return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
/*
* Helper functions to parse data properly.
*/
@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_IP_ADDR:
- if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr)))
+ if (address_not_null(nic->ip_addr))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_GATEWAY:
- if (memcmp(nic->gateway, nulls, sizeof(nic->gateway)))
+ if (address_not_null(nic->gateway))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_PRIMARY_DNS:
- if (memcmp(nic->primary_dns, nulls,
- sizeof(nic->primary_dns)))
+ if (address_not_null(nic->primary_dns))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_SECONDARY_DNS:
- if (memcmp(nic->secondary_dns, nulls,
- sizeof(nic->secondary_dns)))
+ if (address_not_null(nic->secondary_dns))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_DHCP:
- if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp)))
+ if (address_not_null(nic->dhcp))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_VLAN:
@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_ISNS_SERVER:
- if (memcmp(init->isns_server, nulls,
- sizeof(init->isns_server)))
+ if (address_not_null(init->isns_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_SLP_SERVER:
- if (memcmp(init->slp_server, nulls,
- sizeof(init->slp_server)))
+ if (address_not_null(init->slp_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
- if (memcmp(init->pri_radius_server, nulls,
- sizeof(init->pri_radius_server)))
+ if (address_not_null(init->pri_radius_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
- if (memcmp(init->sec_radius_server, nulls,
- sizeof(init->sec_radius_server)))
+ if (address_not_null(init->sec_radius_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_INITIATOR_NAME:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e469c4b2e8cc..c25728bc388a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.cpu_core_id_base);
sysfs_show_32bit_prop(buffer, "simd_id_base",
dev->node_props.simd_id_base);
- sysfs_show_32bit_prop(buffer, "capability",
- dev->node_props.capability);
sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
dev->node_props.max_waves_per_simd);
sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
+ sysfs_show_32bit_prop(buffer, "capability",
+ dev->node_props.capability);
}
return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ffc305fc2076..eb7e61078a5b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return ret ? ret : count;
}
static ssize_t status_show(struct device *device,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 007c7d7d8295..dc55c51964ab 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
+ IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 53394f998a1f..851b585987f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- if (list_empty(&ring->request_list))
- return;
-
WARN_ON(i915_verify_lists(ring->dev));
/* Retire requests first as we use it above for the early return.
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f27346e907b1..d714a4b5711e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_RECEIVE_ERROR))
continue;
if (status & DP_AUX_CH_CTL_DONE)
- break;
+ goto done;
}
- if (status & DP_AUX_CH_CTL_DONE)
- break;
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
+done:
/* Check for timeout or receive error.
* Timeouts occur when the sink is not connected
*/
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 56e437e31580..ae628001fd97 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset;
+ int i = 0, inc, try = 0, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = dev_priv->gpio_mmio_base;
+retry:
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
- for (i = 0; i < num; i++) {
+ for (; i < num; i += inc) {
+ inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- i += 1; /* set i to the index of the read xfer */
+ inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
@@ -525,6 +527,18 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ /*
+ * Passive adapters sometimes NAK the first probe. Retry the first
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+ * has retries internally. See also the retry loop in
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+ */
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
+ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
+ goto retry;
+ }
+
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 09df74b8e917..424e62197787 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+ if (ring->status_page.obj) {
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ (u32)ring->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+ }
+
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 441e2502b889..005b5e04de4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
- if (INTEL_REVID(dev) == SKL_REVID_C0 ||
- INTEL_REVID(dev) == SKL_REVID_D0)
- /* WaBarrierPerformanceFixDisable:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
return 0;
}
@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
return skl_tune_iz_hashing(ring);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e87d2f418de4..987b81f31b0e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising analog device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e597ffc26563..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- /* if there is no audio, set MINM_OVER_MAXP */
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
- (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
- drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index f04205170b8a..cfa3a84a2af0 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_SOURCE | /* select SW CTS value */
HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b7ca4c514621..a7fdfa4f0857 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ /*
+ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+ * after the CP ring have chew one packet at least. Hence here we stop
+ * and restart DPM after the radeon_ib_ring_tests().
+ */
+ if (rdev->pm.dpm_enabled &&
+ (rdev->pm.pm_method == PM_METHOD_DPM) &&
+ (rdev->family == CHIP_TURKS) &&
+ (rdev->flags & RADEON_IS_MOBILITY)) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_dpm_disable(rdev);
+ radeon_dpm_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index de42fc4a22b8..9c3377ca17b7 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
} else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
}
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error_unreserve;
}
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_vm_clear_bo(rdev, pt);
if (r) {
radeon_bo_unref(&pt);
- radeon_bo_reserve(bo_va->bo, false);
return r;
}
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
return 0;
+
+error_unreserve:
+ radeon_bo_unreserve(bo_va->bo);
+ return r;
}
/**
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 8fe78d08e01c..7c6966434ee7 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:i2c-hix5hd2");
+MODULE_ALIAS("platform:hix5hd2-i2c");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 958c8db4ec30..297e9c9ac943 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
i2c->quirks = s3c24xx_get_device_quirks(pdev);
+ i2c->sysreg = ERR_PTR(-ENOENT);
if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata));
else
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 89d8aa1d2818..df12c57e6ce0 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
module_platform_driver(twl6030_gpadc_driver);
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 0916bf6b6c31..73b189c1c0fb 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -139,6 +139,7 @@
#define ADIS16400_NO_BURST BIT(1)
#define ADIS16400_HAS_SLOW_MODE BIT(2)
#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
+#define ADIS16400_BURST_DIAG_STAT BIT(4)
struct adis16400_state;
@@ -165,6 +166,7 @@ struct adis16400_state {
int filt_int;
struct adis adis;
+ unsigned long avail_scan_mask[2];
};
/* At the moment triggers are only used for ring buffer
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index 6e727ffe5262..90c24a23c679 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
{
struct adis16400_state *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
- uint16_t *tx;
+ unsigned int burst_length;
+ u8 *tx;
if (st->variant->flags & ADIS16400_NO_BURST)
return adis_update_scan_mode(indio_dev, scan_mask);
@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
kfree(adis->xfer);
kfree(adis->buffer);
+ /* All but the timestamp channel */
+ burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
+ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+ burst_length += sizeof(u16);
+
adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
if (!adis->xfer)
return -ENOMEM;
- adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
- GFP_KERNEL);
+ adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
if (!adis->buffer)
return -ENOMEM;
- tx = adis->buffer + indio_dev->scan_bytes;
-
+ tx = adis->buffer + burst_length;
tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
tx[1] = 0;
adis->xfer[0].tx_buf = tx;
adis->xfer[0].bits_per_word = 8;
adis->xfer[0].len = 2;
- adis->xfer[1].tx_buf = tx;
+ adis->xfer[1].rx_buf = adis->buffer;
adis->xfer[1].bits_per_word = 8;
- adis->xfer[1].len = indio_dev->scan_bytes;
+ adis->xfer[1].len = burst_length;
spi_message_init(&adis->msg);
spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
struct adis16400_state *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
u32 old_speed_hz = st->adis.spi->max_speed_hz;
+ void *buffer;
int ret;
if (!adis->buffer)
@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
spi_setup(st->adis.spi);
}
- iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+ buffer = adis->buffer + sizeof(u16);
+ else
+ buffer = adis->buffer;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index fa795dcd5f75..2fd68f2219a7 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = st->variant->temp_scale_nano / 1000000;
*val2 = (st->variant->temp_scale_nano % 1000000);
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_PRESSURE:
+ /* 20 uBar = 0.002kPascal */
+ *val = 0;
+ *val2 = 2000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
}
}
-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
- .channel = 0, \
+ .channel = chn, \
.extend_name = name, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
}
#define ADIS16400_SUPPLY_CHAN(addr, bits) \
- ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
- ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
.type = IIO_ANGL_VEL, \
@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
.channels = adis16448_channels,
.num_channels = ARRAY_SIZE(adis16448_channels),
.flags = ADIS16400_HAS_PROD_ID |
- ADIS16400_HAS_SERIAL_NUMBER,
+ ADIS16400_HAS_SERIAL_NUMBER |
+ ADIS16400_BURST_DIAG_STAT,
.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
.temp_scale_nano = 73860000, /* 0.07386 C */
@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
.debugfs_reg_access = adis_debugfs_reg_access,
};
-static const unsigned long adis16400_burst_scan_mask[] = {
- ~0UL,
- 0,
-};
-
static const char * const adis16400_status_error_msgs[] = {
[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
BIT(ADIS16400_DIAG_STAT_POWER_LOW),
};
+static void adis16400_setup_chan_mask(struct adis16400_state *st)
+{
+ const struct adis16400_chip_info *chip_info = st->variant;
+ unsigned i;
+
+ for (i = 0; i < chip_info->num_channels; i++) {
+ const struct iio_chan_spec *ch = &chip_info->channels[i];
+
+ if (ch->scan_index >= 0 &&
+ ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
+ st->avail_scan_mask[0] |= BIT(ch->scan_index);
+ }
+}
+
static int adis16400_probe(struct spi_device *spi)
{
struct adis16400_state *st;
@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
indio_dev->info = &adis16400_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (!(st->variant->flags & ADIS16400_NO_BURST))
- indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+ adis16400_setup_chan_mask(st);
+ indio_dev->available_scan_masks = st->avail_scan_mask;
+ }
ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index ad6a8818608d..bc09b4e1f57c 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,14 +64,6 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
-/* Counters should be saturate once they reach their maximum value */
-#define ASSIGN_32BIT_COUNTER(counter, value) do {\
- if ((value) > U32_MAX) \
- counter = cpu_to_be32(U32_MAX); \
- else \
- counter = cpu_to_be32(value); \
-} while (0)
-
struct mlx4_mad_rcv_buf {
struct ib_grh grh;
u8 payload[256];
@@ -828,31 +820,25 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int err;
- u32 inmod = dev->counters[port_num - 1] & 0xffff;
- u8 mode;
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
- mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
- if (IS_ERR(mailbox))
- return IB_MAD_RESULT_FAILURE;
-
- err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
- MLX4_CMD_WRAPPED);
+ memset(&counter_stats, 0, sizeof(counter_stats));
+ err = mlx4_get_counter_stats(dev->dev,
+ dev->counters[port_num - 1].index,
+ &counter_stats, 0);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
memset(out_mad->data, 0, sizeof out_mad->data);
- mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
- switch (mode & 0xf) {
+ switch (counter_stats.counter_mode & 0xf) {
case 0:
- edit_counter(mailbox->buf,
- (void *)(out_mad->data + 40));
+ edit_counter(&counter_stats,
+ (void *)(out_mad->data + 40));
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@@ -860,8 +846,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
}
- mlx4_free_cmd_mailbox(dev->dev, mailbox);
-
return err;
}
@@ -869,10 +853,12 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ if (!mlx4_is_slave(dev->dev))
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
case IB_LINK_LAYER_ETHERNET:
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 024b0f745035..b6bd217ab5bd 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2098,6 +2098,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
int num_req_counters;
+ int allocated;
+ u32 counter_index;
pr_info_once("%s", mlx4_ib_version);
@@ -2263,19 +2265,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
+ allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
- err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
+ err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+ /* if failed to allocate a new counter, use default */
if (err)
- ibdev->counters[i] = -1;
- } else {
- ibdev->counters[i] = -1;
+ counter_index =
+ mlx4_get_default_counter_index(dev,
+ i + 1);
+ else
+ allocated = 1;
+ } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
+ counter_index = mlx4_get_default_counter_index(dev,
+ i + 1);
}
+ ibdev->counters[i].index = counter_index;
+ ibdev->counters[i].allocated = allocated;
+ pr_info("counter index %d for port %d allocated %d\n",
+ counter_index, i + 1, allocated);
}
if (mlx4_is_bonded(dev))
- for (i = 1; i < ibdev->num_ports ; ++i)
- ibdev->counters[i] = ibdev->counters[0];
-
+ for (i = 1; i < ibdev->num_ports ; ++i) {
+ ibdev->counters[i].index = ibdev->counters[0].index;
+ ibdev->counters[i].allocated = 0;
+ }
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
@@ -2415,10 +2429,12 @@ err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
err_counter:
- for (; i; --i)
- if (ibdev->counters[i - 1] != -1)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
-
+ for (i = 0; i < ibdev->num_ports; ++i) {
+ if (ibdev->counters[i].index != -1 &&
+ ibdev->counters[i].allocated)
+ mlx4_counter_free(ibdev->dev,
+ ibdev->counters[i].index);
+ }
err_map:
iounmap(ibdev->uar_map);
@@ -2535,8 +2551,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- if (ibdev->counters[p] != -1)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p].index != -1 &&
+ ibdev->counters[p].allocated)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ef80e6c99a68..b3912d119342 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -503,6 +503,11 @@ struct mlx4_ib_iov_port {
struct mlx4_ib_iov_sysfs_attr mcg_dentry;
};
+struct counter_index {
+ u32 index;
+ u8 allocated;
+};
+
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@@ -521,7 +526,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
- int counters[MLX4_MAX_PORTS];
+ struct counter_index counters[MLX4_MAX_PORTS];
int *eq_table;
struct kobject *iov_parent;
struct kobject *ports_parent;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 02fc91c68027..c5a3a5f0de41 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1539,12 +1539,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if (dev->counters[qp->port - 1] != -1) {
+ if (dev->counters[qp->port - 1].index != -1) {
context->pri_path.counter_index =
- dev->counters[qp->port - 1];
+ dev->counters[qp->port - 1].index;
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index =
+ MLX4_SINK_COUNTER_INDEX(dev->dev);
if (qp->flags & MLX4_IB_QP_NETIF) {
mlx4_ib_steer_qp_reg(dev, qp, 1);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index f2d9e70818d7..a770490ebbf1 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -137,3 +137,300 @@ out:
kfree(out_mad);
return err;
}
+
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
+{
+ struct ib_smp *in_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ if (!in_mad)
+ return -ENOMEM;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+ out_mad);
+
+ kfree(in_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_desc, out_mad->data, 64);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_guid, out_mad->data + 12, 8);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw, out_mad->data + 8, 8);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int ext_active_speed;
+ int err = -ENOMEM;
+
+ if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ mlx5_ib_warn(dev, "invalid port number %d\n", port);
+ return -EINVAL;
+ }
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof(*props));
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto out;
+ }
+
+ props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
+ props->lmc = out_mad->data[34] & 0x7;
+ props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
+ props->sm_sl = out_mad->data[36] & 0xf;
+ props->state = out_mad->data[32] & 0xf;
+ props->phys_state = out_mad->data[33] >> 4;
+ props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
+ props->gid_tbl_len = out_mad->data[50];
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
+ props->active_width = out_mad->data[31] & 0xf;
+ props->active_speed = out_mad->data[35] >> 4;
+ props->max_mtu = out_mad->data[41] & 0xf;
+ props->active_mtu = out_mad->data[36] >> 4;
+ props->subnet_timeout = out_mad->data[51] & 0x1f;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = out_mad->data[41] >> 4;
+
+ /* Check if extended speeds (EDR/FDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+ ext_active_speed = out_mad->data[62] >> 4;
+
+ switch (ext_active_speed) {
+ case 1:
+ props->active_speed = 16; /* FDR */
+ break;
+ case 2:
+ props->active_speed = 32; /* EDR */
+ break;
+ }
+ }
+
+ /* If reported active speed is QDR, check if is FDR-10 */
+ if (props->active_speed == 4) {
+ if (mdev->port_caps[port - 1].ext_port_cap &
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+ init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port,
+ NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ /* Checking LinkSpeedActive for FDR-10 */
+ if (out_mad->data[15] & 0x1)
+ props->active_speed = 8;
+ }
+ }
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9075649f30fc..79dadd627e9c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,6 +40,7 @@
#include <linux/io-mapping.h>
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
+#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include "user.h"
@@ -62,30 +63,168 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+ case MLX5_CAP_PORT_TYPE_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case MLX5_CAP_PORT_TYPE_ETH:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+ return !dev->mdev->issi;
+}
+
+enum {
+ MLX5_VPORT_ACCESS_METHOD_MAD,
+ MLX5_VPORT_ACCESS_METHOD_HCA,
+ MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+ if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+ return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+ if (mlx5_ib_port_link_layer(ibdev) ==
+ IB_LINK_LAYER_ETHERNET)
+ return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+ return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_system_image_guid(ibdev,
+ sys_image_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+ pkey_table_size));
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+ __be64 *node_guid)
+{
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+struct mlx5_reg_node_desc {
+ u8 desc[64];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct mlx5_reg_node_desc in;
+
+ if (mlx5_use_mad_ifc(dev))
+ return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+
+ memset(&in, 0, sizeof(in));
+
+ return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+ sizeof(struct mlx5_reg_node_desc),
+ MLX5_REG_NODE_DESC, 0, 0);
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
int max_rq_sg;
int max_sq_sg;
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+ memset(props, 0, sizeof(*props));
+ err = mlx5_query_system_image_guid(ibdev,
+ &props->sys_image_guid);
+ if (err)
+ return err;
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
if (err)
- goto out;
+ return err;
- memset(props, 0, sizeof(*props));
+ err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+ if (err)
+ return err;
props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
(fw_rev_min(dev->mdev) << 16) |
@@ -117,11 +256,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
- 0xffffff;
- props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30));
- props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32));
- memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
+ props->vendor_part_id = mdev->pdev->device;
+ props->hw_ver = mdev->pdev->revision;
props->max_mr_size = ~0ull;
props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@@ -147,7 +283,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_fast_reg_page_list_len = (unsigned int)-1;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
- props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -160,175 +295,228 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->odp_caps = dev->odp_caps;
#endif
-out:
- kfree(in_mad);
- kfree(out_mad);
-
- return err;
+ return 0;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props)
+enum mlx5_ib_width {
+ MLX5_IB_WIDTH_1X = 1 << 0,
+ MLX5_IB_WIDTH_2X = 1 << 1,
+ MLX5_IB_WIDTH_4X = 1 << 2,
+ MLX5_IB_WIDTH_8X = 1 << 3,
+ MLX5_IB_WIDTH_12X = 1 << 4
+};
+
+static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+ u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int ext_active_speed;
- int err = -ENOMEM;
-
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
- mlx5_ib_warn(dev, "invalid port number %d\n", port);
- return -EINVAL;
+ int err = 0;
+
+ if (active_width & MLX5_IB_WIDTH_1X) {
+ *ib_width = IB_WIDTH_1X;
+ } else if (active_width & MLX5_IB_WIDTH_2X) {
+ mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
+ (int)active_width);
+ err = -EINVAL;
+ } else if (active_width & MLX5_IB_WIDTH_4X) {
+ *ib_width = IB_WIDTH_4X;
+ } else if (active_width & MLX5_IB_WIDTH_8X) {
+ *ib_width = IB_WIDTH_8X;
+ } else if (active_width & MLX5_IB_WIDTH_12X) {
+ *ib_width = IB_WIDTH_12X;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+ (int)active_width);
+ err = -EINVAL;
}
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- memset(props, 0, sizeof(*props));
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ return err;
+}
- err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err) {
- mlx5_ib_warn(dev, "err %d\n", err);
- goto out;
+static int mlx5_mtu_to_ib_mtu(int mtu)
+{
+ switch (mtu) {
+ case 256: return 1;
+ case 512: return 2;
+ case 1024: return 3;
+ case 2048: return 4;
+ case 4096: return 5;
+ default:
+ pr_warn("invalid mtu\n");
+ return -1;
}
+}
+enum ib_max_vl_num {
+ __IB_MAX_VL_0 = 1,
+ __IB_MAX_VL_0_1 = 2,
+ __IB_MAX_VL_0_3 = 3,
+ __IB_MAX_VL_0_7 = 4,
+ __IB_MAX_VL_0_14 = 5,
+};
- props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
- props->lmc = out_mad->data[34] & 0x7;
- props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
- props->sm_sl = out_mad->data[36] & 0xf;
- props->state = out_mad->data[32] & 0xf;
- props->phys_state = out_mad->data[33] >> 4;
- props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
- props->gid_tbl_len = out_mad->data[50];
- props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
- props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
- props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
- props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
- props->active_width = out_mad->data[31] & 0xf;
- props->active_speed = out_mad->data[35] >> 4;
- props->max_mtu = out_mad->data[41] & 0xf;
- props->active_mtu = out_mad->data[36] >> 4;
- props->subnet_timeout = out_mad->data[51] & 0x1f;
- props->max_vl_num = out_mad->data[37] >> 4;
- props->init_type_reply = out_mad->data[41] >> 4;
-
- /* Check if extended speeds (EDR/FDR/...) are supported */
- if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
- ext_active_speed = out_mad->data[62] >> 4;
-
- switch (ext_active_speed) {
- case 1:
- props->active_speed = 16; /* FDR */
- break;
- case 2:
- props->active_speed = 32; /* EDR */
- break;
- }
- }
+enum mlx5_vl_hw_cap {
+ MLX5_VL_HW_0 = 1,
+ MLX5_VL_HW_0_1 = 2,
+ MLX5_VL_HW_0_2 = 3,
+ MLX5_VL_HW_0_3 = 4,
+ MLX5_VL_HW_0_4 = 5,
+ MLX5_VL_HW_0_5 = 6,
+ MLX5_VL_HW_0_6 = 7,
+ MLX5_VL_HW_0_7 = 8,
+ MLX5_VL_HW_0_14 = 15
+};
- /* If reported active speed is QDR, check if is FDR-10 */
- if (props->active_speed == 4) {
- if (mdev->port_caps[port - 1].ext_port_cap &
- MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
- in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
-
- err = mlx5_MAD_IFC(dev, 1, 1, port,
- NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
-
- /* Checking LinkSpeedActive for FDR-10 */
- if (out_mad->data[15] & 0x1)
- props->active_speed = 8;
- }
- }
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+ u8 *max_vl_num)
+{
+ switch (vl_hw_cap) {
+ case MLX5_VL_HW_0:
+ *max_vl_num = __IB_MAX_VL_0;
+ break;
+ case MLX5_VL_HW_0_1:
+ *max_vl_num = __IB_MAX_VL_0_1;
+ break;
+ case MLX5_VL_HW_0_3:
+ *max_vl_num = __IB_MAX_VL_0_3;
+ break;
+ case MLX5_VL_HW_0_7:
+ *max_vl_num = __IB_MAX_VL_0_7;
+ break;
+ case MLX5_VL_HW_0_14:
+ *max_vl_num = __IB_MAX_VL_0_14;
+ break;
-out:
- kfree(in_mad);
- kfree(out_mad);
+ default:
+ return -EINVAL;
+ }
- return err;
+ return 0;
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid)
+static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int max_mtu;
+ int oper_mtu;
+ int err;
+ u8 ib_link_width_oper;
+ u8 vl_hw_cap;
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
goto out;
+ }
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ memset(props, 0, sizeof(*props));
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
if (err)
goto out;
- memcpy(gid->raw, out_mad->data + 8, 8);
+ props->lid = rep->lid;
+ props->lmc = rep->lmc;
+ props->sm_lid = rep->sm_lid;
+ props->sm_sl = rep->sm_sl;
+ props->state = rep->vport_state;
+ props->phys_state = rep->port_physical_state;
+ props->port_cap_flags = rep->cap_mask1;
+ props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+ props->bad_pkey_cntr = rep->pkey_violation_counter;
+ props->qkey_viol_cntr = rep->qkey_violation_counter;
+ props->subnet_timeout = rep->subnet_timeout;
+ props->init_type_reply = rep->init_type_reply;
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
- in_mad->attr_mod = cpu_to_be32(index / 8);
+ err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
+ if (err)
+ goto out;
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ err = translate_active_width(ibdev, ib_link_width_oper,
+ &props->active_width);
+ if (err)
+ goto out;
+ err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
+ port);
if (err)
goto out;
- memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+ mlx5_query_port_max_mtu(mdev, &max_mtu, port);
+
+ props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
+
+ mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
+
+ props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
+ err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
+ if (err)
+ goto out;
+
+ err = translate_max_vl_num(ibdev, vl_hw_cap,
+ &props->max_vl_num);
out:
- kfree(in_mad);
- kfree(out_mad);
+ kfree(rep);
return err;
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_port(ibdev, port, props);
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_port(ibdev, port, props);
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
- in_mad->attr_mod = cpu_to_be32(index / 32);
+ default:
+ return -EINVAL;
+ }
+}
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
- *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
+
+ default:
+ return -EINVAL;
+ }
-out:
- kfree(in_mad);
- kfree(out_mad);
- return err;
}
-struct mlx5_reg_node_desc {
- u8 desc[64];
-};
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
+ pkey);
+ default:
+ return -EINVAL;
+ }
+}
static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
@@ -727,37 +915,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx5_ib_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
-
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
-
- err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
-
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
-
- in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+ int err;
- err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
if (err)
- goto out;
+ return err;
- dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
- memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+ dev->mdev->rev_id = dev->mdev->pdev->revision;
-out:
- kfree(in_mad);
- kfree(out_mad);
- return err;
+ return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
@@ -1154,8 +1320,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_BASIC;
+ devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+ if (IS_ERR(devr->s1)) {
+ ret = PTR_ERR(devr->s1);
+ goto error5;
+ }
+ devr->s1->device = &dev->ib_dev;
+ devr->s1->pd = devr->p0;
+ devr->s1->uobject = NULL;
+ devr->s1->event_handler = NULL;
+ devr->s1->srq_context = NULL;
+ devr->s1->srq_type = IB_SRQT_BASIC;
+ devr->s1->ext.xrc.cq = devr->c0;
+ atomic_inc(&devr->p0->usecnt);
+ atomic_set(&devr->s0->usecnt, 0);
+
return 0;
+error5:
+ mlx5_ib_destroy_srq(devr->s0);
error4:
mlx5_ib_dealloc_xrcd(devr->x1);
error3:
@@ -1170,6 +1357,7 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
+ mlx5_ib_destroy_srq(devr->s1);
mlx5_ib_destroy_srq(devr->s0);
mlx5_ib_dealloc_xrcd(devr->x0);
mlx5_ib_dealloc_xrcd(devr->x1);
@@ -1183,6 +1371,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int err;
int i;
+ /* don't create IB instance over Eth ports, no RoCE yet! */
+ if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
+ return NULL;
+
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -1195,7 +1387,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_dealloc;
- get_ext_port_caps(dev);
+ if (mlx5_use_mad_ifc(dev))
+ get_ext_port_caps(dev);
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 0c441add0464..873dc354766a 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -415,6 +415,7 @@ struct mlx5_ib_resources {
struct ib_xrcd *x1;
struct ib_pd *p0;
struct ib_srq *s0;
+ struct ib_srq *s1;
};
struct mlx5_ib_dev {
@@ -594,6 +595,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad);
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid);
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys);
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id);
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 15fd485d1ad9..203c8a45e095 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1012,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
} else {
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ in->ctx.rq_type_srqn |=
+ cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
}
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index e8e8e942fa4a..e008505e96e9 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -302,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7752bd59d4b7..a353b7de6d22 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
right = (packet[1] & 0x02) >> 1;
middle = (packet[1] & 0x04) >> 2;
- /* Divide 2 since trackpoint's speed is too fast */
- input_report_rel(dev2, REL_X, (char)x / 2);
- input_report_rel(dev2, REL_Y, -((char)y / 2));
+ input_report_rel(dev2, REL_X, (char)x);
+ input_report_rel(dev2, REL_Y, -((char)y));
input_report_key(dev2, BTN_LEFT, left);
input_report_key(dev2, BTN_RIGHT, right);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 79363b687195..ce3d40004458 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
return true;
/*
- * Some models have a revision higher then 20. Meaning param[2] may
- * be 10 or 20, skip the rates check for these.
+ * Some hw_version >= 4 models have a revision higher then 20. Meaning
+ * that param[2] may be 10 or 20, skip the rates check for these.
*/
- if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+ if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
+ param[2] < 40)
return true;
for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 9:
case 10:
case 13:
+ case 14:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 630af73e98c4..35c8d0ceabee 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1024, 5112, 2024, 4832
},
{
+ (const char * const []){"LEN2000", NULL},
+ {ANY_BOARD_ID, ANY_BOARD_ID},
+ 1024, 5113, 2021, 4832
+ },
+ {
(const char * const []){"LEN2001", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5022, 2508, 4832
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0045",
"LEN0047",
"LEN0049",
- "LEN2000",
+ "LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
"LEN2003",
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e43d48956dea..e1c7e9e51045 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+ flag |= __GFP_ZERO;
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
if (!page) {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 68d43beccb7e..5ecfaf29933a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations — the ones with
+ * PASID support on bit 28 — have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0;
+ } else if (!strncmp(str, "ecs_off", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable extended context table support\n");
+ intel_iommu_ecs = 0;
}
str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
- if (ecap_ecs(iommu->ecap)) {
+ if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
@@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
return &context[devfn];
}
+static int iommu_dummy(struct device *dev)
+{
+ return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
u16 segment = 0;
int i;
+ if (iommu_dummy(dev))
+ return NULL;
+
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
segment = pci_domain_nr(pdev->bus);
@@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
if (context)
free_pgtable_page(context);
- if (!ecap_ecs(iommu->ecap))
+ if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
- if (ecap_ecs(iommu->ecap))
+ if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
return __get_valid_domain_for_dev(dev);
}
-static int iommu_dummy(struct device *dev)
-{
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev)
{
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ead0a4fb7448..a0fdbc074b98 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -267,8 +267,8 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
}
// The descriptor is wrong for some early samples of the ST5481 chip
- altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
- altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
+ altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32);
+ altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32);
// Use alternative setting 3 on interface 0 to have 2B+D
if ((status = usb_set_interface(dev, 0, 3)) < 0) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 27506302eb7a..4dbed4a67aaf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
}
spin_unlock(&mddev->lock);
- return err;
+ return err ?: len;
}
err = mddev_lock(mddev);
if (err)
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- if (mddev_lock(mddev) == 0) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ mddev_lock(mddev) == 0) {
+ flush_workqueue(md_misc_wq);
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
- mddev_unlock(mddev);
}
+ mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e793ab6b3570..f55c3f35b746 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 553d54b87052..b6793d2e051f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2268438f3f63..19eb990d398c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3059,8 +3059,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
- fk->addrs.src = iph->saddr;
- fk->addrs.dst = iph->daddr;
+ iph_to_flow_copy_v4addrs(fk, iph);
noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
proto = iph->protocol;
@@ -3068,8 +3067,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
- fk->addrs.src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
- fk->addrs.dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ iph_to_flow_copy_v6addrs(fk, iph6);
noff += sizeof(*iph6);
proto = iph6->nexthdr;
} else {
@@ -3103,7 +3101,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash = bond_eth_hash(skb);
else
hash = (__force u32)flow.ports.ports;
- hash ^= (__force u32)flow.addrs.dst ^ (__force u32)flow.addrs.src;
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f7015eb4f8db..5580fcde738f 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,8 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
0;
}
@@ -56,12 +58,23 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
+ const struct port *ad_port;
+ ad_port = &SLAVE_AD_INFO(slave)->port;
agg = SLAVE_AD_INFO(slave)->port.aggregator;
- if (agg)
+ if (agg) {
if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
agg->aggregator_identifier))
goto nla_put_failure;
+ if (nla_put_u8(skb,
+ IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+ ad_port->actor_oper_port_state))
+ goto nla_put_failure;
+ if (nla_put_u16(skb,
+ IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ ad_port->partner_oper.port_state))
+ goto nla_put_failure;
+ }
}
return 0;
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 23618a831612..7d16c51e6913 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -80,6 +80,36 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
}
static SLAVE_ATTR_RO(ad_aggregator_id);
+static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
+{
+ const struct port *ad_port;
+
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+ ad_port = &SLAVE_AD_INFO(slave)->port;
+ if (ad_port->aggregator)
+ return sprintf(buf, "%u\n",
+ ad_port->actor_oper_port_state);
+ }
+
+ return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_actor_oper_port_state);
+
+static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
+{
+ const struct port *ad_port;
+
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+ ad_port = &SLAVE_AD_INFO(slave)->port;
+ if (ad_port->aggregator)
+ return sprintf(buf, "%u\n",
+ ad_port->partner_oper.port_state);
+ }
+
+ return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_partner_oper_port_state);
+
static const struct slave_attribute *slave_attrs[] = {
&slave_attr_state,
&slave_attr_mii_status,
@@ -87,6 +117,8 @@ static const struct slave_attribute *slave_attrs[] = {
&slave_attr_perm_hwaddr,
&slave_attr_queue_id,
&slave_attr_ad_aggregator_id,
+ &slave_attr_ad_actor_oper_port_state,
+ &slave_attr_ad_partner_oper_port_state,
NULL
};
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bf63fee4e743..c1a95a34d62e 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -190,10 +190,11 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
-#define RXFSIDH(n) ((n) * 4)
-#define RXFSIDL(n) ((n) * 4 + 1)
-#define RXFEID8(n) ((n) * 4 + 2)
-#define RXFEID0(n) ((n) * 4 + 3)
+#define RXFSID(n) ((n < 3) ? 0 : 4)
+#define RXFSIDH(n) ((n) * 4 + RXFSID(n))
+#define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n))
+#define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n))
+#define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n))
#define RXMSIDH(n) ((n) * 4 + 0x20)
#define RXMSIDL(n) ((n) * 4 + 0x21)
#define RXMEID8(n) ((n) * 4 + 0x22)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 103fde3da476..972982f8bea7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -24,6 +24,7 @@
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
+#include <linux/brcmphy.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -697,7 +698,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* Include the pseudo-PHY address and the broadcast PHY address to
* divert reads towards our workaround
*/
- ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
+ ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
@@ -782,7 +783,7 @@ static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
*/
switch (addr) {
case 0:
- case 30:
+ case BRCM_PSEUDO_PHY_ADDR:
return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
default:
return 0xffff;
@@ -797,7 +798,7 @@ static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
*/
switch (addr) {
case 0:
- case 30:
+ case BRCM_PSEUDO_PHY_ADDR:
bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
break;
}
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 7fba330ce702..39530fa142b0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -703,8 +703,8 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
u32 high = 0;
if (s->reg >= 0x100) {
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100);
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100);
if (ret < 0)
goto error;
low = ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c75204909702..1e9c28d19ef8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -424,7 +424,7 @@ static void xgbe_tx_timer(unsigned long data)
if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq)
- disable_irq(channel->dma_irq);
+ disable_irq_nosync(channel->dma_irq);
else
xgbe_disable_rx_tx_ints(pdata);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 77363d680532..a3b1c07ae0af 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2464,6 +2464,7 @@ err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
err_out_free_dev:
+ netif_napi_del(&bp->napi);
free_netdev(dev);
out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
+ netif_napi_del(&bp->napi);
free_netdev(dev);
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
ssb_set_drvdata(sdev, NULL);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 3e9c3fc7591b..65d88d7c5581 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -1,6 +1,8 @@
#ifndef _B44_H
#define _B44_H
+#include <linux/brcmphy.h>
+
/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
@@ -281,8 +283,10 @@ struct ring_info {
};
#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
-#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+/* no local phy regs, e.g: Broadcom switches pseudo-PHY */
+#define B44_PHY_ADDR_NO_LOCAL_PHY BRCM_PSEUDO_PHY_ADDR
+/* no phy present at all */
+#define B44_PHY_ADDR_NO_PHY 31
#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index db27febbb215..4fbb093e0d84 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -13,6 +13,7 @@
dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
@@ -349,7 +350,7 @@
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF
-#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F
#define BGMAC_MAX_TX_RINGS 4
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6043734ea613..b43b2cb9b830 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev)
return ret;
}
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = ring->priv;
+ u32 p_index, c_index, intsts, intmsk;
+ struct netdev_queue *txq;
+ unsigned int free_bds;
+ unsigned long flags;
+ bool txq_stopped;
+
+ if (!netif_msg_tx_err(priv))
+ return;
+
+ txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->index == DESC_INDEX) {
+ intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+ } else {
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
+ }
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+ txq_stopped = netif_tx_queue_stopped(txq);
+ free_bds = ring->free_bds;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+ "TX queue status: %s, interrupts: %s\n"
+ "(sw)free_bds: %d (sw)size: %d\n"
+ "(sw)p_index: %d (hw)p_index: %d\n"
+ "(sw)c_index: %d (hw)c_index: %d\n"
+ "(sw)clean_p: %d (sw)write_p: %d\n"
+ "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+ ring->index, ring->queue,
+ txq_stopped ? "stopped" : "active",
+ intsts & intmsk ? "enabled" : "disabled",
+ free_bds, ring->size,
+ ring->prod_index, p_index & DMA_P_INDEX_MASK,
+ ring->c_index, c_index & DMA_C_INDEX_MASK,
+ ring->clean_ptr, ring->write_ptr,
+ ring->cb_ptr, ring->end_ptr);
+}
+
static void bcmgenet_timeout(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 int0_enable = 0;
+ u32 int1_enable = 0;
+ unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+ bcmgenet_disable_tx_napi(priv);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+ bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+ bcmgenet_tx_reclaim_all(dev);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ int1_enable |= (1 << q);
+
+ int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+ /* Re-enable TX interrupts if disabled */
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+ bcmgenet_enable_tx_napi(priv);
+
dev->trans_start = jiffies;
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e7651b3c6c57..6bef04e2f735 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -47,7 +47,12 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
HZ / 100);
ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
- if (ret & MDIO_READ_FAIL)
+ /* Some broken devices are known not to release the line during
+ * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+ * that condition here and ignore the MDIO controller read failure
+ * indication.
+ */
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
return -EIO;
return ret & 0xffff;
@@ -299,9 +304,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
phy_name = "external RGMII (no delay)";
else
phy_name = "external RGMII (TX delay)";
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
bcmgenet_sys_writel(priv,
PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
break;
@@ -310,6 +312,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
return -EINVAL;
}
+ /* This is an external PHY (xMII), so we need to enable the RGMII
+ * block for the interface to work
+ */
+ if (priv->ext_phy) {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 6e10b99733a2..8584abcf5366 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -9,5 +9,3 @@ obj-$(CONFIG_BNA) += bna.o
bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
-
-EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index cf9f3956f198..95bc8b644a5d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -282,7 +282,6 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
- bfa_q_qe_init(&cee->ioc_notify);
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index af25d8e8fae0..1d11d666d408 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -28,19 +28,6 @@
typedef void (*bfa_sm_t)(void *sm, int event);
-/* oc - object class eg. bfa_ioc
- * st - state, eg. reset
- * otype - object type, eg. struct bfa_ioc
- * etype - object type, eg. enum ioc_event
- */
-#define bfa_sm_state_decl(oc, st, otype, etype) \
- static void oc ## _sm_ ## st(otype * fsm, etype event)
-
-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
-#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
-#define bfa_sm_get_state(_sm) ((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-
/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
@@ -67,7 +54,6 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 3bfd9da92630..d152b3fa6c54 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -24,7 +24,6 @@
#include "bfa_defs_status.h"
#include "bfa_defs_mfg_comm.h"
-#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/* ---------------------- adapter definitions ------------ */
@@ -55,7 +54,7 @@ struct bfa_adapter_attr {
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd vpd;
- struct mac mac;
+ u8 mac[ETH_ALEN];
u8 nports;
u8 max_speed;
@@ -187,8 +186,6 @@ enum {
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
-#pragma pack(1)
-
/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
@@ -211,7 +208,7 @@ struct bfa_mfg_block {
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /* base mac address */
+ u8 mfg_mac[ETH_ALEN]; /* base mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 card_type; /* card type */
@@ -227,9 +224,7 @@ struct bfa_mfg_block {
char initial_mode[8]; /* initial mode: hba/cna/nic */
u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
-};
-
-#pragma pack()
+} __packed;
/* ---------------------- pci definitions ------------ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index a37326d44fbb..f048887cbb81 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -109,8 +109,6 @@ union bfa_port_stats_u {
struct bfa_port_eth_stats eth;
};
-#pragma pack(1)
-
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
#define BFA_CEE_DCBX_MAX_PRIORITY (8)
#define BFA_CEE_DCBX_MAX_PGID (8)
@@ -133,7 +131,7 @@ struct bfa_cee_lldp_str {
u8 len;
u8 rsvd[2];
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
-};
+} __packed;
/* LLDP parameters */
struct bfa_cee_lldp_cfg {
@@ -145,7 +143,7 @@ struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str mgmt_addr;
u16 time_to_live;
u16 enabled_system_cap;
-};
+} __packed;
enum bfa_cee_dcbx_version {
DCBX_PROTOCOL_PRECEE = 1,
@@ -171,7 +169,7 @@ struct bfa_cee_dcbx_cfg {
u8 lls_fcoe; /* FCoE Logical Link Status */
u8 lls_lan; /* LAN Logical Link Status */
u8 rsvd[2];
-};
+} __packed;
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
@@ -188,11 +186,11 @@ struct bfa_cee_attr {
u8 error_reason;
struct bfa_cee_lldp_cfg lldp_remote;
struct bfa_cee_dcbx_cfg dcbx_remote;
- mac_t src_mac;
+ u8 src_mac[ETH_ALEN];
u8 link_speed;
u8 nw_priority;
u8 filler[2];
-};
+} __packed;
/* LLDP/DCBX/CEE Statistics */
struct bfa_cee_stats {
@@ -214,8 +212,6 @@ struct bfa_cee_stats {
u32 cee_status_up; /*!< CEE status up */
u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7a45cd0b594d..7e17451c94d1 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -59,8 +59,6 @@ enum {
BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
};
-#pragma pack(1)
-
/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
@@ -77,7 +75,7 @@ enum {
CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
- CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+ CB_GPIO_PROTO = BIT(7) /*!< 8G 2port FC prototypes */
};
#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
@@ -86,7 +84,7 @@ do { \
(prop) |= BFI_ADAPTER_PROTO; \
(gpio) &= ~CB_GPIO_PROTO; \
} \
- switch ((gpio)) { \
+ switch (gpio) { \
case CB_GPIO_TTV: \
(prop) |= BFI_ADAPTER_TTV; \
case CB_GPIO_DFLY: \
@@ -148,8 +146,6 @@ struct bfa_mfg_vpd {
u8 len; /*!< vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 68f3c13c9ef6..b7a0f7879de2 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -23,14 +23,6 @@
/* IOC local definitions */
-#define bfa_ioc_state_disabled(__sm) \
- (((__sm) == BFI_IOC_UNINIT) || \
- ((__sm) == BFI_IOC_INITING) || \
- ((__sm) == BFI_IOC_HWINIT) || \
- ((__sm) == BFI_IOC_DISABLED) || \
- ((__sm) == BFI_IOC_FAIL) || \
- ((__sm) == BFI_IOC_CFG_DISABLED))
-
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -57,12 +49,6 @@
((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
-#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
- ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
-
-#define bfa_ioc_mbox_cmd_pending(__ioc) \
- (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
- readl((__ioc)->ioc_regs.hfn_mbox_cmd))
static bool bfa_nw_auto_recover = true;
@@ -1105,12 +1091,9 @@ static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
struct bfa_ioc_notify *notify;
- struct list_head *qe;
- list_for_each(qe, &ioc->notify_q) {
- notify = (struct bfa_ioc_notify *)qe;
+ list_for_each_entry(notify, &ioc->notify_q, qe)
notify->cbfn(notify->cbarg, event);
- }
}
static void
@@ -1321,7 +1304,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
i++) {
fwsig[i] =
- swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ swab32(readl(loff + ioc->ioc_regs.smem_page_start));
loff += sizeof(u32);
}
}
@@ -1387,7 +1370,7 @@ static enum bfi_ioc_img_ver_cmp
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
struct bfi_ioc_image_hdr *fwhdr_to_cmp)
{
- if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
return BFI_IOC_IMG_VER_INCOMP;
if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
@@ -1398,7 +1381,7 @@ bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
/* GA takes priority over internal builds of the same patch stream.
* At this point major minor maint and patch numbers are same.
*/
- if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(base_fwhdr))
if (fwhdr_is_ga(fwhdr_to_cmp))
return BFI_IOC_IMG_VER_SAME;
else
@@ -1692,7 +1675,7 @@ bfa_raw_sem_get(void __iomem *bar)
{
int locked;
- locked = readl((bar + FLASH_SEM_LOCK_REG));
+ locked = readl(bar + FLASH_SEM_LOCK_REG);
return !locked;
}
@@ -1912,10 +1895,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
}
void
-bfa_nw_ioc_timeout(void *ioc_arg)
+bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
@@ -1980,10 +1961,9 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
}
void
-bfa_nw_ioc_hb_check(void *cbarg)
+bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = cbarg;
- u32 hb_count;
+ u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
@@ -2069,8 +2049,8 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/**
* write smem
*/
- writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
- ((ioc->ioc_regs.smem_page_start) + (loff)));
+ writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]),
+ ioc->ioc_regs.smem_page_start + loff);
loff += sizeof(u32);
@@ -2177,7 +2157,8 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
/**
* Enqueue command to firmware.
*/
- bfa_q_deq(&mod->cmd_q, &cmd);
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
/**
@@ -2198,8 +2179,10 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
- while (!list_empty(&mod->cmd_q))
- bfa_q_deq(&mod->cmd_q, &cmd);
+ while (!list_empty(&mod->cmd_q)) {
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
+ }
}
/**
@@ -2223,14 +2206,14 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
- if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
return 1;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
for (i = 0; i < len; i++) {
- r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start));
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
@@ -2278,7 +2261,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
int tlen;
if (ioc->dbg_fwsave_once) {
- ioc->dbg_fwsave_once = 0;
+ ioc->dbg_fwsave_once = false;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
@@ -2796,7 +2779,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
ad_attr->prototype = 0;
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
- ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+ bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -2942,10 +2925,10 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
return ioc->attr->pwwn;
}
-mac_t
-bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+void
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
{
- return ioc->attr->mac;
+ ether_addr_copy(mac, ioc->attr->mac);
}
/* Firmware failure detected. Start recovery actions. */
@@ -2997,9 +2980,8 @@ bfa_iocpf_stop(struct bfa_ioc *ioc)
}
void
-bfa_nw_iocpf_timeout(void *ioc_arg)
+bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
enum bfa_iocpf_state iocpf_st;
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
@@ -3011,10 +2993,8 @@ bfa_nw_iocpf_timeout(void *ioc_arg)
}
void
-bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_ioc_hw_sem_get(ioc);
}
@@ -3245,7 +3225,6 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
flash->op_busy = 0;
bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
- bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index effb7156e7a4..2c0b4c076355 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -232,12 +232,6 @@ struct bfa_ioc_hwif {
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_is_default(__ioc) \
(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
-#define bfa_ioc_fetch_stats(__ioc, __stats) \
- (((__stats)->drv_stats) = (__ioc)->stats)
-#define bfa_ioc_clr_stats(__ioc) \
- memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
-#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
-#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
@@ -268,13 +262,6 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
-#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
- if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
-} while (0)
-#define bfa_ioc_ownership_reset(__ioc) \
- ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
-
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
@@ -309,7 +296,7 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
-mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
@@ -317,10 +304,10 @@ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
*/
-void bfa_nw_ioc_timeout(void *ioc);
-void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_iocpf_timeout(void *ioc);
-void bfa_nw_iocpf_sem_timeout(void *ioc);
+void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
+void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
/*
* F/W Image Size & Chunk
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 2e72445dbb4f..74e5ed55ac01 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -23,8 +23,7 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-#define bfa_ioc_ct_sync_pos(__ioc) \
- ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
@@ -536,7 +535,7 @@ bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+ writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
}
static bool
@@ -667,7 +666,7 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
- r32 = readl((rb + PSS_CTL_REG));
+ r32 = readl(rb + PSS_CTL_REG);
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
@@ -678,7 +677,7 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
udelay(1000);
- r32 = readl((rb + MBIST_STAT_REG));
+ r32 = readl(rb + MBIST_STAT_REG);
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
@@ -691,7 +690,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
/*
* put s_clk PLL and PLL FSM in reset
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
__APP_PLL_SCLK_LOGIC_SOFT_RESET);
@@ -701,28 +700,28 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
* Ignore mode and program for the max clock (which is FC16)
* Firmware/NFC will do the PLL init appropriately
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* while doing PLL init dont clock gate ethernet subsystem
*/
- r32 = readl((rb + CT2_CHIP_MISC_PRG));
- writel((r32 | __ETH_CLK_ENABLE_PORT0),
- (rb + CT2_CHIP_MISC_PRG));
+ r32 = readl(rb + CT2_CHIP_MISC_PRG);
+ writel(r32 | __ETH_CLK_ENABLE_PORT0,
+ rb + CT2_CHIP_MISC_PRG);
- r32 = readl((rb + CT2_PCIE_MISC_REG));
- writel((r32 | __ETH_CLK_ENABLE_PORT1),
- (rb + CT2_PCIE_MISC_REG));
+ r32 = readl(rb + CT2_PCIE_MISC_REG);
+ writel(r32 | __ETH_CLK_ENABLE_PORT1,
+ rb + CT2_PCIE_MISC_REG);
/*
* set sclk value
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
__APP_PLL_SCLK_CLK_DIV2);
- writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
/*
* poll for s_clk lock or delay 1ms
@@ -743,28 +742,28 @@ bfa_ioc_ct2_lclk_init(void __iomem *rb)
/*
* put l_clk PLL and PLL FSM in reset
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
__APP_PLL_LCLK_LOGIC_SOFT_RESET);
- writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
/*
* set LPU speed (set for FC16 which will work for other modes)
*/
- r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ r32 = readl(rb + CT2_CHIP_MISC_PRG);
writel(r32, (rb + CT2_CHIP_MISC_PRG));
/*
* set LPU half speed (set for FC16 which will work for other modes)
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
/*
* set lclk for mode (set for FC16)
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
r32 |= 0x20c1731b;
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
@@ -780,14 +779,14 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
{
u32 r32;
- r32 = readl((rb + PSS_CTL_REG));
+ r32 = readl(rb + PSS_CTL_REG);
r32 &= ~__PSS_LMEM_RESET;
- writel(r32, (rb + PSS_CTL_REG));
+ writel(r32, rb + PSS_CTL_REG);
udelay(1000);
- writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
udelay(1000);
- writel(0, (rb + CT2_MBIST_CTL_REG));
+ writel(0, rb + CT2_MBIST_CTL_REG);
}
static void
@@ -801,22 +800,22 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
/*
* release soft reset on s_clk & l_clk
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
- writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
- (rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + CT2_APP_PLL_SCLK_CTL_REG);
/*
* release soft reset on s_clk & l_clk
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
- (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + CT2_APP_PLL_LCLK_CTL_REG);
/* put port0, port1 MAC & AHB in reset */
- writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
- (rb + CT2_CSI_MAC_CONTROL_REG(0)));
- writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
- (rb + CT2_CSI_MAC_CONTROL_REG(1)));
+ writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+ rb + CT2_CSI_MAC_CONTROL_REG(0));
+ writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+ rb + CT2_CSI_MAC_CONTROL_REG(1));
}
#define CT2_NFC_MAX_DELAY 1000
@@ -861,8 +860,8 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
- if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
- (nfc_ver >= CT2_NFC_VER_VALID)) {
+ if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
+ nfc_ver >= CT2_NFC_VER_VALID) {
if (bfa_ioc_ct2_nfc_halted(rb))
bfa_ioc_ct2_nfc_resume(rb);
writel(__RESET_AND_START_SCLK_LCLK_PLLS,
@@ -899,19 +898,19 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
bfa_ioc_ct2_lclk_init(rb);
/* release soft reset on s_clk & l_clk */
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_SCLK_CTL_REG);
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_LCLK_CTL_REG);
}
/* Announce flash device presence, if flash was corrupted. */
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
- r32 = readl((rb + PSS_GPIO_OUT_REG));
+ r32 = readl(rb + PSS_GPIO_OUT_REG);
writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
- r32 = readl((rb + PSS_GPIO_OE_REG));
+ r32 = readl(rb + PSS_GPIO_OE_REG);
writel(r32 | 1, rb + PSS_GPIO_OE_REG);
}
@@ -919,27 +918,27 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
* Mask the interrupts and clear any
* pending interrupts left by BIOS/EFI
*/
- writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
- writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+ writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
+ writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
/* For first time initialization, no need to clear interrupts */
r32 = readl(rb + HOST_SEM5_REG);
if (r32 & 0x1) {
- r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
if (r32 == 1) {
- writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
}
- r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
if (r32 == 1) {
- writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
}
}
bfa_ioc_ct2_mem_init(rb);
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
return BFA_STATUS_OK;
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index c07d5b9372f4..9c5bb24e8abb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -66,8 +66,9 @@ cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
cmdq->offset = 0;
cmdq->bytes_to_copy = 0;
while (!list_empty(&cmdq->pending_q)) {
- bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
- bfa_q_qe_init(&cmdq_ent->qe);
+ cmdq_ent = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
+ list_del(&cmdq_ent->qe);
call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
}
}
@@ -242,8 +243,8 @@ bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
/* Walk through pending list to see if the command can be posted */
while (!list_empty(&cmdq->pending_q)) {
- cmd =
- (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+ cmd = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(cmdq)) {
list_del(&cmd->qe);
@@ -615,7 +616,6 @@ bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
bfa_msgq_rspq_attach(&msgq->rspq, msgq);
bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
- bfa_q_qe_init(&msgq->ioc_notify);
bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 2bcde4042268..81e59ea8b4f2 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -21,8 +21,6 @@
#include "bfa_defs.h"
-#pragma pack(1)
-
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
@@ -36,10 +34,10 @@ struct bfi_mhdr {
struct {
u8 qid;
u8 fn_lpu; /*!< msg destination */
- } h2i;
+ } __packed h2i;
u16 i2htok; /*!< token in msgs to host */
- } mtag;
-};
+ } __packed mtag;
+} __packed;
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
@@ -75,14 +73,14 @@ union bfi_addr_u {
struct {
u32 addr_lo;
u32 addr_hi;
- } a32;
-};
+ } __packed a32;
+} __packed;
/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
-};
+} __packed;
/*
* Large Message structure - 128 Bytes size Msgs
@@ -96,7 +94,7 @@ struct bfi_alen {
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
-};
+} __packed;
/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
@@ -184,19 +182,19 @@ enum bfi_ioc_i2h_msgs {
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
-};
+} __packed;
struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */
- mac_t mfg_mac; /*!< Mfg mac */
+ u8 mfg_mac[ETH_ALEN]; /*!< Mfg mac */
u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn;
u64 nwwn;
- mac_t mac; /*!< PBC or Mfg mac */
+ u8 mac[ETH_ALEN]; /*!< PBC or Mfg mac */
u16 rsvd_b;
- mac_t fcoe_mac;
+ u8 fcoe_mac[ETH_ALEN];
u16 rsvd_c;
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
u8 pcie_gen;
@@ -211,14 +209,14 @@ struct bfi_ioc_attr {
char optrom_version[BFA_VERSION_LEN];
struct bfa_mfg_vpd vpd;
u32 card_type; /*!< card type */
-};
+} __packed;
/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
-};
+} __packed;
/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
@@ -256,7 +254,7 @@ struct bfi_ioc_fwver {
u8 build;
u8 rsvd[2];
#endif
-};
+} __packed;
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
@@ -269,7 +267,7 @@ struct bfi_ioc_image_hdr {
u32 rsvd_b[2];
struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
-};
+} __packed;
enum bfi_ioc_img_ver_cmp {
BFI_IOC_IMG_VER_INCOMP,
@@ -301,7 +299,7 @@ enum bfi_port_mode {
struct bfi_ioc_hbeat {
struct bfi_mhdr mh; /*!< common msg header */
u32 hb_count; /*!< current heart beat count */
-};
+} __packed;
/* IOC hardware/firmware state */
enum bfi_ioc_state {
@@ -317,8 +315,6 @@ enum bfi_ioc_state {
BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
};
-#define BFI_IOC_ENDIAN_SIG 0x12345678
-
enum {
BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
@@ -337,12 +333,6 @@ enum {
BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_SETP(__prop, __val) \
((__val) << BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_IS_PROTO(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_PROTO)
-#define BFI_ADAPTER_IS_TTV(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_TTV)
-#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_UNSUPP)
#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
@@ -353,7 +343,7 @@ struct bfi_ioc_ctrl_req {
u16 clscode;
u16 rsvd;
u32 tv_sec;
-};
+} __packed;
/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
@@ -362,7 +352,7 @@ struct bfi_ioc_ctrl_reply {
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
-};
+} __packed;
#define BFI_IOC_MSGSZ 8
/* H2I Messages */
@@ -372,14 +362,14 @@ union bfi_ioc_h2i_msg_u {
struct bfi_ioc_ctrl_req disable_req;
struct bfi_ioc_getattr_req getattr_req;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/*----------------------------------------------------------------------
* MSGQ
@@ -408,7 +398,7 @@ struct bfi_msgq_mhdr {
u16 num_entries;
u8 enet_id;
u8 rsvd[1];
-};
+} __packed;
#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
(_mh).msg_class = (_mc); \
@@ -430,21 +420,21 @@ struct bfi_msgq {
union bfi_addr_u addr;
u16 q_depth; /* Total num of entries in the queue */
u8 rsvd[2];
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
struct bfi_msgq_cfg_req {
struct bfi_mhdr mh;
struct bfi_msgq cmdq;
struct bfi_msgq rspq;
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_RSP */
struct bfi_msgq_cfg_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* BFI_MSGQ_H2I_DOORBELL */
struct bfi_msgq_h2i_db {
@@ -452,8 +442,8 @@ struct bfi_msgq_h2i_db {
union {
u16 cmdq_pi;
u16 rspq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
/* BFI_MSGQ_I2H_DOORBELL */
struct bfi_msgq_i2h_db {
@@ -461,8 +451,8 @@ struct bfi_msgq_i2h_db {
union {
u16 rspq_pi;
u16 cmdq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
#define BFI_CMD_COPY_SZ 28
@@ -470,14 +460,14 @@ struct bfi_msgq_i2h_db {
struct bfi_msgq_h2i_cmdq_copy_rsp {
struct bfi_mhdr mh;
u8 data[BFI_CMD_COPY_SZ];
-};
+} __packed;
/* BFI_MSGQ_I2H_CMD_COPY_REQ */
struct bfi_msgq_i2h_cmdq_copy_req {
struct bfi_mhdr mh;
u16 offset;
u16 len;
-};
+} __packed;
/*
* FLASH module specific
@@ -505,7 +495,7 @@ enum bfi_flash_i2h_msgs {
struct bfi_flash_query_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash write request
@@ -519,7 +509,7 @@ struct bfi_flash_write_req {
u8 rsv[2];
u32 offset;
u32 length;
-};
+} __packed;
/*
* Flash read request
@@ -532,7 +522,7 @@ struct bfi_flash_read_req {
u32 offset;
u32 length;
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash query response
@@ -540,7 +530,7 @@ struct bfi_flash_read_req {
struct bfi_flash_query_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 status;
-};
+} __packed;
/*
* Flash read response
@@ -552,7 +542,7 @@ struct bfi_flash_read_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
+} __packed;
/*
* Flash write response
@@ -564,8 +554,6 @@ struct bfi_flash_write_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index bd605bee72ee..fad651101c48 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -22,8 +22,6 @@
#include "bfi.h"
#include "bfa_defs_cna.h"
-#pragma pack(1)
-
enum bfi_port_h2i {
BFI_PORT_H2I_ENABLE_REQ = (1),
BFI_PORT_H2I_DISABLE_REQ = (2),
@@ -43,7 +41,7 @@ struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
-};
+} __packed;
/* Generic RSP type */
struct bfi_port_generic_rsp {
@@ -51,13 +49,13 @@ struct bfi_port_generic_rsp {
u8 status; /*!< port enable status */
u8 rsvd[3];
u32 msgtag; /*!< msgtag for reply */
-};
+} __packed;
/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
-};
+} __packed;
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
@@ -65,7 +63,7 @@ union bfi_port_h2i_msg_u {
struct bfi_port_generic_req disable_req;
struct bfi_port_get_stats_req getstats_req;
struct bfi_port_generic_req clearstats_req;
-};
+} __packed;
union bfi_port_i2h_msg_u {
struct bfi_mhdr mh;
@@ -73,7 +71,7 @@ union bfi_port_i2h_msg_u {
struct bfi_port_generic_rsp disable_rsp;
struct bfi_port_generic_rsp getstats_rsp;
struct bfi_port_generic_rsp clearstats_rsp;
-};
+} __packed;
/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
enum bfi_cee_h2i_msgs {
@@ -97,7 +95,7 @@ enum bfi_cee_i2h_msgs {
*/
struct bfi_lldp_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief H2I command structure for resetting the stats.
@@ -105,7 +103,7 @@ struct bfi_lldp_reset_stats {
*/
struct bfi_cee_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -114,7 +112,7 @@ struct bfi_cee_reset_stats {
struct bfi_cee_get_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -124,7 +122,7 @@ struct bfi_cee_get_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -133,7 +131,7 @@ struct bfi_cee_get_rsp {
struct bfi_cee_stats_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -143,22 +141,20 @@ struct bfi_cee_stats_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* @brief mailbox command structures from host to firmware */
union bfi_cee_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_req get_req;
struct bfi_cee_stats_req stats_req;
-};
+} __packed;
/* @brief mailbox message structures from firmware to host */
union bfi_cee_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_rsp get_rsp;
struct bfi_cee_stats_rsp stats_rsp;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index bccca3bbadb8..d7be7ea8c7f5 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -36,8 +36,6 @@
#include "bfa_defs.h"
#include "bfi.h"
-#pragma pack(1)
-
#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
#define BFI_ENET_TXQ_PRIO_MAX 8
@@ -59,8 +57,8 @@ union bfi_addr_be_u {
struct {
u32 addr_hi; /* Most Significant 32-bits */
u32 addr_lo; /* Least Significant 32-Bits */
- } a32;
-};
+ } __packed a32;
+} __packed;
/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
@@ -70,13 +68,13 @@ union bfi_addr_be_u {
#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM BIT(0)
struct bfi_enet_txq_wi_base {
u8 reserved;
@@ -88,28 +86,28 @@ struct bfi_enet_txq_wi_base {
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
-};
+} __packed;
struct bfi_enet_txq_wi_ext {
u16 reserved;
u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
u32 reserved2[3];
-};
+} __packed;
struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
u16 reserved;
u16 length; /* Only 14 LSB are valid */
union bfi_addr_be_u addr;
-};
+} __packed;
/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
struct bfi_enet_txq_wi_ext ext;
- } wi;
+ } __packed wi;
struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
-};
+} __packed;
#define wi_hdr wi.base
#define wi_ext_hdr wi.ext
@@ -120,36 +118,36 @@ struct bfi_enet_txq_entry {
/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
-};
+} __packed;
/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
-#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
-#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
-#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
-#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
+#define BFI_ENET_CQ_EF_MAC_ERROR BIT(0)
+#define BFI_ENET_CQ_EF_FCS_ERROR BIT(1)
+#define BFI_ENET_CQ_EF_TOO_LONG BIT(2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK BIT(3)
-#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
-#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
+#define BFI_ENET_CQ_EF_RSVD1 BIT(4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BFI_ENET_CQ_EF_HDS_HEADER BIT(7)
-#define BFI_ENET_CQ_EF_UDP (1 << 8)
-#define BFI_ENET_CQ_EF_TCP (1 << 9)
-#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
-#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
+#define BFI_ENET_CQ_EF_UDP BIT(8)
+#define BFI_ENET_CQ_EF_TCP BIT(9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS BIT(10)
+#define BFI_ENET_CQ_EF_IPV6 BIT(11)
-#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
-#define BFI_ENET_CQ_EF_VLAN (1 << 13)
-#define BFI_ENET_CQ_EF_RSS (1 << 14)
-#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
+#define BFI_ENET_CQ_EF_IPV4 BIT(12)
+#define BFI_ENET_CQ_EF_VLAN BIT(13)
+#define BFI_ENET_CQ_EF_RSS BIT(14)
+#define BFI_ENET_CQ_EF_RSVD2 BIT(15)
-#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
-#define BFI_ENET_CQ_EF_MCAST (1 << 17)
-#define BFI_ENET_CQ_EF_BCAST (1 << 18)
-#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
+#define BFI_ENET_CQ_EF_MCAST_MATCH BIT(16)
+#define BFI_ENET_CQ_EF_MCAST BIT(17)
+#define BFI_ENET_CQ_EF_BCAST BIT(18)
+#define BFI_ENET_CQ_EF_REMOTE BIT(19)
-#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
+#define BFI_ENET_CQ_EF_LOCAL BIT(20)
/* CQ Entry Structure */
struct bfi_enet_cq_entry {
@@ -161,7 +159,7 @@ struct bfi_enet_cq_entry {
u8 reserved1;
u8 reserved2;
u8 rxq_id;
-};
+} __packed;
/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
@@ -169,23 +167,23 @@ struct bfi_enet_q {
union bfi_addr_u first_entry;
u16 pages; /* # of pages */
u16 page_sz;
-};
+} __packed;
struct bfi_enet_txq {
struct bfi_enet_q q;
u8 priority;
u8 rsvd[3];
-};
+} __packed;
struct bfi_enet_rxq {
struct bfi_enet_q q;
u16 rx_buffer_size;
u16 rsvd;
-};
+} __packed;
struct bfi_enet_cq {
struct bfi_enet_q q;
-};
+} __packed;
struct bfi_enet_ib_cfg {
u8 int_pkt_dma;
@@ -198,16 +196,16 @@ struct bfi_enet_ib_cfg {
u32 inter_pkt_timeout;
u8 inter_pkt_count;
u8 rsvd1[3];
-};
+} __packed;
struct bfi_enet_ib {
union bfi_addr_u index_addr;
union {
u16 msix_index;
u16 intx_bitmask;
- } intr;
+ } __packed intr;
u16 rsvd;
-};
+} __packed;
/* ENET command messages */
enum bfi_enet_h2i_msgs {
@@ -355,7 +353,7 @@ enum bfi_enet_err {
*/
struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* Enable/Disable Request
*
@@ -370,7 +368,7 @@ struct bfi_enet_enable_req {
struct bfi_msgq_mhdr mh;
u8 enable; /* 1 = enable; 0 = disable */
u8 rsvd[3];
-};
+} __packed;
/* Generic Response */
struct bfi_enet_rsp {
@@ -378,7 +376,7 @@ struct bfi_enet_rsp {
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
-};
+} __packed;
/* GLOBAL CONFIGURATION */
@@ -387,7 +385,7 @@ struct bfi_enet_rsp {
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
@@ -400,7 +398,7 @@ struct bfi_enet_attr_rsp {
u32 max_cfg;
u32 max_ucmac;
u32 rit_size;
-};
+} __packed;
/* Tx Configuration
*
@@ -421,7 +419,7 @@ struct bfi_enet_tx_cfg {
u8 apply_vlan_filter;
u8 add_to_vswitch;
u8 rsvd1[1];
-};
+} __packed;
struct bfi_enet_tx_cfg_req {
struct bfi_msgq_mhdr mh;
@@ -431,7 +429,7 @@ struct bfi_enet_tx_cfg_req {
struct {
struct bfi_enet_txq q;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX];
struct bfi_enet_ib_cfg ib_cfg;
@@ -448,7 +446,7 @@ struct bfi_enet_tx_cfg_rsp {
u32 i_dbell; /* PCI base address offset */
u8 hw_qid; /* For debugging */
u8 rsvd[3];
- } q_handles[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
/* Rx Configuration
@@ -481,13 +479,13 @@ struct bfi_enet_rx_cfg {
u8 force_offset;
u8 type;
u8 rsvd1;
- } hds;
+ } __packed hds;
u8 multi_buffer;
u8 strip_vlan;
u8 drop_untagged;
u8 rsvd2;
-};
+} __packed;
/*
* Multicast frames are received on the ql of q-set index zero.
@@ -504,12 +502,12 @@ struct bfi_enet_rx_cfg_req {
struct bfi_enet_rxq qs; /* small/header buffers */
struct bfi_enet_cq cq;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_RX_QSET_MAX];
+ } __packed q_cfg[BFI_ENET_RX_QSET_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_rx_cfg rx_cfg;
-};
+} __packed;
struct bfi_enet_rx_cfg_rsp {
struct bfi_msgq_mhdr mh;
@@ -524,8 +522,8 @@ struct bfi_enet_rx_cfg_rsp {
u8 hw_sqid; /* For debugging */
u8 hw_cqid; /* For debugging */
u8 rsvd;
- } q_handles[BFI_ENET_RX_QSET_MAX];
-};
+ } __packed q_handles[BFI_ENET_RX_QSET_MAX];
+} __packed;
/* RIT
*
@@ -537,7 +535,7 @@ struct bfi_enet_rit_req {
u16 size; /* number of table-entries used */
u8 rsvd[2];
u8 table[BFI_ENET_RSS_RIT_MAX];
-};
+} __packed;
/* RSS
*
@@ -556,12 +554,12 @@ struct bfi_enet_rss_cfg {
u8 mask;
u8 rsvd[2];
u32 key[BFI_ENET_RSS_KEY_LEN];
-};
+} __packed;
struct bfi_enet_rss_cfg_req {
struct bfi_msgq_mhdr mh;
struct bfi_enet_rss_cfg cfg;
-};
+} __packed;
/* MAC Unicast
*
@@ -573,16 +571,16 @@ struct bfi_enet_rss_cfg_req {
*/
struct bfi_enet_ucast_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
- mac_t mac_addr;
-};
+ u8 mac_addr[ETH_ALEN];
+} __packed;
/* MAC Multicast
*
@@ -591,9 +589,9 @@ struct bfi_enet_mac_n_vlan_req {
*/
struct bfi_enet_mcast_add_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
@@ -605,7 +603,7 @@ struct bfi_enet_mcast_add_rsp {
u16 cmd_offset;
u16 handle;
u8 rsvd1[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
@@ -614,7 +612,7 @@ struct bfi_enet_mcast_del_req {
struct bfi_msgq_mhdr mh;
u16 handle;
u8 rsvd[2];
-};
+} __packed;
/* VLAN
*
@@ -626,7 +624,7 @@ struct bfi_enet_rx_vlan_req {
u8 block_idx;
u8 rsvd[3];
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
-};
+} __packed;
/* PAUSE
*
@@ -638,7 +636,7 @@ struct bfi_enet_set_pause_req {
u8 rsvd[2];
u8 tx_pause; /* 1 = enable; 0 = disable */
u8 rx_pause; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* DIAGNOSTICS
*
@@ -650,7 +648,7 @@ struct bfi_enet_diag_lb_req {
u8 rsvd[2];
u8 mode; /* cable or Serdes */
u8 enable; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* enum for Loopback opmodes */
enum {
@@ -671,14 +669,14 @@ struct bfi_enet_stats_req {
u32 rx_enet_mask;
u32 tx_enet_mask;
union bfi_addr_u host_buffer;
-};
+} __packed;
/* defines for "stats_mask" above. */
-#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
-#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
-#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
-#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
-#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+#define BFI_ENET_STATS_MAC BIT(0) /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC BIT(1) /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD BIT(2) /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC BIT(3) /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC BIT(4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_ALL 0x1f
@@ -699,7 +697,7 @@ struct bfi_enet_stats_txf {
u64 errors;
u64 filter_vlan; /* frames filtered due to VLAN */
u64 filter_mac_sa; /* frames filtered due to SA check */
-};
+} __packed;
/* RxF Frame Statistics */
struct bfi_enet_stats_rxf {
@@ -715,7 +713,7 @@ struct bfi_enet_stats_rxf {
u64 bcast;
u64 bcast_vlan;
u64 frame_drops;
-};
+} __packed;
/* FC Tx Frame Statistics */
struct bfi_enet_stats_fc_tx {
@@ -734,7 +732,7 @@ struct bfi_enet_stats_fc_tx {
u64 txf_parity_errors;
u64 txf_timeout;
u64 txf_fid_parity_errors;
-};
+} __packed;
/* FC Rx Frame Statistics */
struct bfi_enet_stats_fc_rx {
@@ -749,7 +747,7 @@ struct bfi_enet_stats_fc_rx {
u64 rxf_bcast_octets;
u64 rxf_bcast;
u64 rxf_bcast_vlan;
-};
+} __packed;
/* RAD Frame Statistics */
struct bfi_enet_stats_rad {
@@ -770,7 +768,7 @@ struct bfi_enet_stats_rad {
u64 rx_bcast_vlan;
u64 rx_drops;
-};
+} __packed;
/* BPC Tx Registers */
struct bfi_enet_stats_bpc {
@@ -785,7 +783,7 @@ struct bfi_enet_stats_bpc {
u64 rx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 rx_first_pause[8];
-};
+} __packed;
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
@@ -838,7 +836,7 @@ struct bfi_enet_stats_mac {
u64 tx_oversize;
u64 tx_undersize;
u64 tx_fragments;
-};
+} __packed;
/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
@@ -852,8 +850,6 @@ struct bfi_enet_stats {
struct bfi_enet_stats_fc_tx fc_tx_stats;
struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 8ba72b1f36d9..006dcad9a260 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -28,36 +28,8 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Macros and constants */
-#define BNA_IOC_TIMER_FREQ 200
-
-/* Log string size */
-#define BNA_MESSAGE_SIZE 256
-
#define bna_is_small_rxq(_id) ((_id) & 0x1)
-#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
- (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
-
-#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
-
-#define BNA_TO_POWER_OF_2(x) \
-do { \
- int _shift = 0; \
- while ((x) && (x) != 1) { \
- (x) >>= 1; \
- _shift++; \
- } \
- (x) <<= _shift; \
-} while (0)
-
-#define BNA_TO_POWER_OF_2_HIGH(x) \
-do { \
- int n = 1; \
- while (n < (x)) \
- n <<= 1; \
- (x) = n; \
-} while (0)
-
/*
* input : _addr-> os dma addr in host endian format,
* output : _bna_dma_addr-> pointer to hw dma addr
@@ -80,62 +52,8 @@ do { \
| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
} while (0)
-#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
- (unsigned char *)(&((type *)0)->field)))
-
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
-/* TxQ element is 64 bytes */
-#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
-#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
-
-#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
- (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
-}
-
-/* RxQ element is 8 bytes */
-#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
-#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
-
-#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
-}
-
-/* CQ element is 16 bytes */
-#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
-#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
-
-#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- \
- page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
-}
-
-#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
- (&((_cast *)(_q_base))[(_qe_idx)])
-
-#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
-
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
@@ -147,31 +65,10 @@ do { \
#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
((_q_depth) - 1))
-
#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
(_q_depth - 1))
-#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
-
-#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
-
-#define BNA_Q_PI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.producer_index = \
- (((_q_ptr)->q.producer_index + (_num)) & \
- ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
- & ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_FREE_COUNT(_q_ptr) \
- (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
- (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
-
#define BNA_LARGE_PKT_SIZE 1000
#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
@@ -222,21 +119,6 @@ do { \
} \
} while (0)
-#define call_rxf_pause_cbfn(rxf) \
-do { \
- if ((rxf)->oper_state_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_rx *); \
- struct bnad *cbarg; \
- cbfn = (rxf)->oper_state_cbfn; \
- cbarg = (rxf)->oper_state_cbarg; \
- (rxf)->oper_state_cbfn = NULL; \
- (rxf)->oper_state_cbarg = NULL; \
- cbfn(cbarg, rxf->rx); \
- } \
-} while (0)
-
-#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
-
#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
@@ -326,28 +208,24 @@ do { \
#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
#define bna_tx_from_rid(_bna, _rid, _tx) \
-do { \
- struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
- struct bna_tx *__tx; \
- struct list_head *qe; \
- _tx = NULL; \
- list_for_each(qe, &__tx_mod->tx_active_q) { \
- __tx = (struct bna_tx *)qe; \
- if (__tx->rid == (_rid)) { \
- (_tx) = __tx; \
- break; \
- } \
- } \
+do { \
+ struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
+ struct bna_tx *__tx; \
+ _tx = NULL; \
+ list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \
+ if (__tx->rid == (_rid)) { \
+ (_tx) = __tx; \
+ break; \
+ } \
+ } \
} while (0)
#define bna_rx_from_rid(_bna, _rid, _rx) \
do { \
struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
struct bna_rx *__rx; \
- struct list_head *qe; \
_rx = NULL; \
- list_for_each(qe, &__rx_mod->rx_active_q) { \
- __rx = (struct bna_rx *)qe; \
+ list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \
if (__rx->rid == (_rid)) { \
(_rx) = __rx; \
break; \
@@ -365,17 +243,14 @@ do { \
/* Inline functions */
-static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
- list_for_each(qe, q) {
- if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
- mac = (struct bna_mac *)qe;
- break;
- }
- }
- return mac;
+ struct bna_mac *mac;
+
+ list_for_each_entry(mac, q, qe)
+ if (ether_addr_equal(mac->addr, addr))
+ return mac;
+ return NULL;
}
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
@@ -401,7 +276,6 @@ void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
-void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -488,31 +362,19 @@ void bna_rx_cleanup_complete(struct bna_rx *rx);
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
-enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac);
+enum bna_cb_status bna_rx_ucast_listset(struct bna_rx *rx, int count,
+ const u8 *uclist);
+enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, const u8 *mcmac,
+ void (*cbfn)(struct bnad *,
+ struct bna_rx *));
+enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count,
+ const u8 *mcmac);
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_mcast_delall(struct bna_rx *rx);
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+ enum bna_rxmode bitmask);
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
@@ -532,11 +394,10 @@ void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *));
+ struct bna_pause_config *pause_config);
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
-void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
/* IOCETH */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index deb8da6ab9cc..4e5c3874a50f 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -207,7 +207,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
- if (rx_enet_mask & ((u32)(1 << i))) {
+ if (rx_enet_mask & BIT(i)) {
int k;
count = sizeof(struct bfi_enet_stats_rxf) /
sizeof(u64);
@@ -222,7 +222,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
- if (tx_enet_mask & ((u32)(1 << i))) {
+ if (tx_enet_mask & BIT(i)) {
int k;
count = sizeof(struct bfi_enet_stats_txf) /
sizeof(u64);
@@ -884,16 +884,6 @@ do { \
} \
} while (0)
-#define call_enet_pause_cbfn(enet) \
-do { \
- if ((enet)->pause_cbfn) { \
- void (*cbfn)(struct bnad *); \
- cbfn = (enet)->pause_cbfn; \
- (enet)->pause_cbfn = NULL; \
- cbfn((enet)->bna->bnad); \
- } \
-} while (0)
-
#define call_enet_mtu_cbfn(enet) \
do { \
if ((enet)->mtu_cbfn) { \
@@ -925,7 +915,6 @@ bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
static void
bna_enet_sm_stopped_entry(struct bna_enet *enet)
{
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
call_enet_stop_cbfn(enet);
}
@@ -947,7 +936,6 @@ bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
break;
case ENET_E_PAUSE_CFG:
- call_enet_pause_cbfn(enet);
break;
case ENET_E_MTU_CFG:
@@ -1039,7 +1027,6 @@ bna_enet_sm_started_entry(struct bna_enet *enet)
* NOTE: Do not call bna_enet_chld_start() here, since it will be
* inadvertently called during cfg_wait->started transition as well
*/
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
}
@@ -1211,8 +1198,6 @@ bna_enet_init(struct bna_enet *enet, struct bna *bna)
enet->stop_cbfn = NULL;
enet->stop_cbarg = NULL;
- enet->pause_cbfn = NULL;
-
enet->mtu_cbfn = NULL;
bfa_fsm_set_state(enet, bna_enet_sm_stopped);
@@ -1308,13 +1293,10 @@ bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void
bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *))
+ struct bna_pause_config *pause_config)
{
enet->pause_config = *pause_config;
- enet->pause_cbfn = cbfn;
-
bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
}
@@ -1330,9 +1312,9 @@ bna_enet_mtu_set(struct bna_enet *enet, int mtu,
}
void
-bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
{
- *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+ bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
}
/* IOCETH */
@@ -1810,17 +1792,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&ucam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
- }
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&ucam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
- }
ucam_mod->bna = bna;
}
@@ -1828,17 +1806,6 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &ucam_mod->free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &ucam_mod->del_q)
- i++;
-
ucam_mod->bna = NULL;
}
@@ -1852,27 +1819,21 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
- }
mcam_mod->mchandle = (struct bna_mcam_handle *)
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_handle_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mchandle[i].qe,
- &mcam_mod->free_handle_q);
- }
+ &mcam_mod->free_handle_q);
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&mcam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
- }
mcam_mod->bna = bna;
}
@@ -1880,18 +1841,6 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
static void
bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->del_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_handle_q) i++;
-
mcam_mod->bna = NULL;
}
@@ -2108,32 +2057,26 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
struct bna_mac *
bna_cam_mod_mac_get(struct list_head *head)
{
- struct list_head *qe;
-
- if (list_empty(head))
- return NULL;
+ struct bna_mac *mac;
- bfa_q_deq(head, &qe);
- return (struct bna_mac *)qe;
-}
+ mac = list_first_entry_or_null(head, struct bna_mac, qe);
+ if (mac)
+ list_del(&mac->qe);
-void
-bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, tail);
+ return mac;
}
struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
-
- if (list_empty(&mcam_mod->free_handle_q))
- return NULL;
+ struct bna_mcam_handle *handle;
- bfa_q_deq(&mcam_mod->free_handle_q, &qe);
+ handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
+ struct bna_mcam_handle, qe);
+ if (handle)
+ list_del(&handle->qe);
- return (struct bna_mcam_handle *)qe;
+ return handle;
}
void
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 174af0e9d056..52b45c9935aa 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -213,7 +213,7 @@ do { \
* 15 bits (32K) should be large enough to accumulate, anyways, and the max.
* acked events to h/w can be (32K + max poll weight) (currently 64).
*/
-#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
+#define BNA_IB_MAX_ACK_EVENTS BIT(15)
/* These macros build the data portion of the TxQ/RxQ doorbell */
#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
@@ -282,13 +282,13 @@ do { \
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BNA_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BNA_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BNA_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BNA_TXQ_WI_CF_IP_CKSUM BIT(0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -297,36 +297,36 @@ do { \
* Completion Q defines
*/
/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-
-#define BNA_CQ_EF_RSVD1 (1 << 4)
-#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
-#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
-
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
-
-#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
-
-#define BNA_CQ_EF_LOCAL (1 << 20)
+#define BNA_CQ_EF_MAC_ERROR BIT(0)
+#define BNA_CQ_EF_FCS_ERROR BIT(1)
+#define BNA_CQ_EF_TOO_LONG BIT(2)
+#define BNA_CQ_EF_FC_CRC_OK BIT(3)
+
+#define BNA_CQ_EF_RSVD1 BIT(4)
+#define BNA_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BNA_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BNA_CQ_EF_HDS_HEADER BIT(7)
+
+#define BNA_CQ_EF_UDP BIT(8)
+#define BNA_CQ_EF_TCP BIT(9)
+#define BNA_CQ_EF_IP_OPTIONS BIT(10)
+#define BNA_CQ_EF_IPV6 BIT(11)
+
+#define BNA_CQ_EF_IPV4 BIT(12)
+#define BNA_CQ_EF_VLAN BIT(13)
+#define BNA_CQ_EF_RSS BIT(14)
+#define BNA_CQ_EF_RSVD2 BIT(15)
+
+#define BNA_CQ_EF_MCAST_MATCH BIT(16)
+#define BNA_CQ_EF_MCAST BIT(17)
+#define BNA_CQ_EF_BCAST BIT(18)
+#define BNA_CQ_EF_REMOTE BIT(19)
+
+#define BNA_CQ_EF_LOCAL BIT(20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
-#define BNA_CQ_EF_EOP (1 << 31)
+#define BNA_CQ_EF_EOP BIT(31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 8ab3a5f62706..5d0753cc7e73 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -46,7 +46,6 @@ do { \
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
-static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
@@ -60,14 +59,10 @@ static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
@@ -82,11 +77,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
- if (rxf->flags & BNA_RXF_F_PAUSED) {
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- call_rxf_start_cbfn(rxf);
- } else
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
@@ -101,45 +92,6 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
call_rxf_cam_fltr_cbfn(rxf);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_pause_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- call_rxf_resume_cbfn(rxf);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
-{
- call_rxf_pause_cbfn(rxf);
-}
-
-static void
-bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- case RXF_E_FAIL:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CONFIG:
- call_rxf_cam_fltr_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
- break;
-
default:
bfa_sm_fault(event);
}
@@ -166,7 +118,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
@@ -174,12 +125,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
/* No-op */
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_start_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
@@ -197,7 +142,6 @@ bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
}
static void
@@ -214,41 +158,6 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- if (!bna_rxf_fltr_clear(rxf))
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- else
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
-{
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- bna_rxf_cfg_reset(rxf);
- call_rxf_pause_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_FW_RESP:
- if (!bna_rxf_fltr_clear(rxf)) {
- /* No more pending CAM entries to clear */
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- }
- break;
-
default:
bfa_sm_fault(event);
}
@@ -283,7 +192,7 @@ bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -299,7 +208,7 @@ bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -444,22 +353,17 @@ bna_bfi_rss_enable(struct bna_rxf *rxf)
/* This function gets the multicast MAC that has already been added to CAM */
static struct bna_mac *
-bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
+bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
{
struct bna_mac *mac;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_active_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_active_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
- list_for_each(qe, &rxf->mcast_pending_del_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
return NULL;
}
@@ -468,13 +372,10 @@ static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_handle_q) {
- mchandle = (struct bna_mcam_handle *)qe;
+ list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
if (mchandle->handle == handle)
return mchandle;
- }
return NULL;
}
@@ -515,7 +416,6 @@ bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
ret = 1;
}
list_del(&mchandle->qe);
- bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
@@ -527,26 +427,23 @@ static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
@@ -566,7 +463,7 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
block_idx++;
vlan_pending_bitmask >>= 1;
}
- rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+ rxf->vlan_pending_bitmask &= ~BIT(block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
@@ -577,27 +474,24 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->mcast_pending_add_q);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
@@ -658,25 +552,6 @@ bna_rxf_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/* Only software reset */
-static int
-bna_rxf_fltr_clear(struct bna_rxf *rxf)
-{
- if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- return 0;
-}
-
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
@@ -693,16 +568,13 @@ bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
- struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
-
}
void
@@ -760,9 +632,6 @@ bna_rxf_init(struct bna_rxf *rxf,
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
- if (q_config->paused)
- rxf->flags |= BNA_RXF_F_PAUSED;
-
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
@@ -795,22 +664,21 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
}
if (rxf->ucast_pending_mac) {
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
- rxf->ucast_pending_mac);
+ list_add_tail(&rxf->ucast_pending_mac->qe,
+ bna_ucam_mod_free_q(rxf->rx->bna));
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
rxf->rxmode_pending = 0;
@@ -823,8 +691,6 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
- rxf->flags = 0;
-
rxf->rx = NULL;
}
@@ -863,8 +729,7 @@ bna_rxf_fail(struct bna_rxf *rxf)
}
enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -873,12 +738,11 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
- memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
rxf->ucast_pending_set = 1;
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
@@ -887,7 +751,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
}
enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
@@ -904,8 +768,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, addr, ETH_ALEN);
+ ether_addr_copy(mac->addr, addr);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
@@ -917,35 +780,31 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist)
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
+ const u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Allocate nodes */
@@ -954,69 +813,57 @@ bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist)
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
+ const u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
-
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Allocate nodes */
@@ -1025,8 +872,7 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
@@ -1034,70 +880,52 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_delall(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
- struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_del(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
-
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
need_hw_config = 1;
}
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
+ if (need_hw_config)
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- return;
- }
-
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
}
void
@@ -1105,12 +933,12 @@ bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1120,12 +948,12 @@ bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1134,23 +962,21 @@ static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
- memcpy(rxf->ucast_active_mac.addr,
- rxf->ucast_pending_mac->addr, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_active_mac.addr,
+ rxf->ucast_pending_mac->addr);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
@@ -1159,9 +985,8 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
@@ -1173,33 +998,30 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
if (cleanup == BNA_SOFT_CLEANUP)
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
else {
bna_bfi_ucast_req(rxf, mac,
- BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
- mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
@@ -1654,14 +1476,11 @@ static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
- }
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
@@ -1804,7 +1623,6 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -1814,11 +1632,9 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
-
+ for (i = 0; i < rx->num_paths; i++) {
+ rxp = rxp ? list_next_entry(rxp, qe)
+ : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
@@ -1921,13 +1737,10 @@ static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
/* Stop IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_stop(rx->bna, &rxp->cq.ib);
- }
bna_bfi_rx_enet_stop(rx);
}
@@ -1957,12 +1770,10 @@ static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
- struct list_head *qe = NULL;
- bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
+ list_del(&rxq->qe);
rx_mod->rxq_free_count--;
- rxq = (struct bna_rxq *)qe;
- bfa_q_qe_init(&rxq->qe);
return rxq;
}
@@ -1970,7 +1781,6 @@ bna_rxq_get(struct bna_rx_mod *rx_mod)
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
- bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -1978,13 +1788,11 @@ bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
- bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
rx_mod->rxp_free_count--;
- rxp = (struct bna_rxp *)qe;
- bfa_q_qe_init(&rxp->qe);
return rxp;
}
@@ -1992,7 +1800,6 @@ bna_rxp_get(struct bna_rx_mod *rx_mod)
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
- bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2000,18 +1807,16 @@ bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
- struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
- if (type == BNA_RX_T_REGULAR) {
- bfa_q_deq(&rx_mod->rx_free_q, &qe);
- } else
- bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+ BUG_ON(list_empty(&rx_mod->rx_free_q));
+ if (type == BNA_RX_T_REGULAR)
+ rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
+ else
+ rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
rx_mod->rx_free_count--;
- rx = (struct bna_rx *)qe;
- bfa_q_qe_init(&rx->qe);
- list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ list_move_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
@@ -2020,32 +1825,13 @@ bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
- struct list_head *prev_qe = NULL;
struct list_head *qe;
- bfa_q_qe_init(&rx->qe);
-
- list_for_each(qe, &rx_mod->rx_free_q) {
+ list_for_each_prev(qe, &rx_mod->rx_free_q)
if (((struct bna_rx *)qe)->rid < rx->rid)
- prev_qe = qe;
- else
break;
- }
-
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
- } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
- /* This is the last entry */
- list_add_tail(&rx->qe, &rx_mod->rx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&rx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &rx->qe;
- bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
- }
+ list_add(&rx->qe, qe);
rx_mod->rx_free_count++;
}
@@ -2199,24 +1985,20 @@ void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type)
bna_rx_start(rx);
- }
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
@@ -2225,13 +2007,11 @@ bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
- }
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
@@ -2240,15 +2020,12 @@ void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
bna_rx_fail(rx);
- }
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
@@ -2282,7 +2059,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
- bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
@@ -2296,7 +2072,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
- bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2304,7 +2079,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
- bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -2313,21 +2087,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &rx_mod->rx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxp_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxq_free_q)
- i++;
-
rx_mod->bna = NULL;
}
@@ -2337,7 +2096,6 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
@@ -2345,10 +2103,8 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
rx->hw_id = cfg_rsp->hw_id;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
+ for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
@@ -2396,20 +2152,19 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
- cq_depth = dq_depth + hq_depth;
+ cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
- BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_depth = roundup_pow_of_two(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
- BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_depth = roundup_pow_of_two(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
@@ -2620,7 +2375,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
- rxp->cq.ib.intr_vector = (1 << rxp->vector);
+ rxp->cq.ib.intr_vector = BIT(rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
@@ -2691,7 +2446,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_depth = roundup_pow_of_two(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
@@ -2725,7 +2480,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_mod->rid_mask |= (1 << rx->rid);
+ rx_mod->rid_mask |= BIT(rx->rid);
return rx;
}
@@ -2742,7 +2497,8 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
- bfa_q_deq(&rx->rxp_q, &rxp);
+ rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
@@ -2769,15 +2525,13 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxp_put(rx_mod, rxp);
}
- list_for_each(qe, &rx_mod->rx_active_q) {
+ list_for_each(qe, &rx_mod->rx_active_q)
if (qe == &rx->qe) {
list_del(&rx->qe);
- bfa_q_qe_init(&rx->qe);
break;
}
- }
- rx_mod->rid_mask &= ~(1 << rx->rid);
+ rx_mod->rid_mask &= ~BIT(rx->rid);
rx->bna = NULL;
rx->priv = NULL;
@@ -2844,8 +2598,7 @@ bna_rx_vlan_strip_disable(struct bna_rx *rx)
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+ enum bna_rxmode bitmask)
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
@@ -2900,11 +2653,10 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
/* Trigger h/w if needed */
if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
+ }
return BNA_CB_SUCCESS;
@@ -2928,10 +2680,8 @@ void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
- struct list_head *qe;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
@@ -3024,16 +2774,6 @@ do { \
} \
} while (0)
-#define call_tx_prio_change_cbfn(tx) \
-do { \
- if ((tx)->prio_change_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_tx *); \
- cbfn = (tx)->prio_change_cbfn; \
- (tx)->prio_change_cbfn = NULL; \
- cbfn((tx)->bna->bnad, (tx)); \
- } \
-} while (0)
-
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
@@ -3044,7 +2784,6 @@ enum bna_tx_event {
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
- TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
@@ -3085,10 +2824,6 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
/* No-op */
break;
- case TX_E_PRIO_CHANGE:
- call_tx_prio_change_cbfn(tx);
- break;
-
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3109,28 +2844,23 @@ bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
- if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
- BNA_TX_F_BW_UPDATED);
+ if (tx->flags & BNA_TX_F_BW_UPDATED) {
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
- case TX_E_PRIO_CHANGE:
- tx->flags |= BNA_TX_F_PRIO_CHANGED;
- break;
-
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
@@ -3144,11 +2874,9 @@ static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
@@ -3172,7 +2900,6 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
@@ -3205,7 +2932,6 @@ bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bna_tx_enet_stop(tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3225,7 +2951,6 @@ bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3256,7 +2981,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
@@ -3264,7 +2988,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3277,7 +3000,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
@@ -3293,7 +3015,6 @@ bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3372,7 +3093,6 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -3381,11 +3101,9 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq;
- i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0; i < tx->num_txq; i++) {
+ txq = txq ? list_next_entry(txq, qe)
+ : list_first_entry(&tx->txq_q, struct bna_txq, qe);
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
@@ -3437,13 +3155,10 @@ static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
/* Stop IB */
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_stop(tx->bna, &txq->ib);
- }
bna_bfi_tx_enet_stop(tx);
}
@@ -3487,18 +3202,15 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
- struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
- if (type == BNA_TX_T_REGULAR) {
- bfa_q_deq(&tx_mod->tx_free_q, &qe);
- } else {
- bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
- }
- tx = (struct bna_tx *)qe;
- bfa_q_qe_init(&tx->qe);
+ if (type == BNA_TX_T_REGULAR)
+ tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ else
+ tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ list_del(&tx->qe);
tx->type = type;
return tx;
@@ -3509,21 +3221,18 @@ bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
- struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
- bfa_q_deq(&tx->txq_q, &txq);
- bfa_q_qe_init(&txq->qe);
+ txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
txq->tcb = NULL;
txq->tx = NULL;
- list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ list_move_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
- bfa_q_qe_init(&tx->qe);
break;
}
}
@@ -3531,28 +3240,11 @@ bna_tx_free(struct bna_tx *tx)
tx->bna = NULL;
tx->priv = NULL;
- prev_qe = NULL;
- list_for_each(qe, &tx_mod->tx_free_q) {
+ list_for_each_prev(qe, &tx_mod->tx_free_q)
if (((struct bna_tx *)qe)->rid < tx->rid)
- prev_qe = qe;
- else {
break;
- }
- }
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
- } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
- /* This is the last entry */
- list_add_tail(&tx->qe, &tx_mod->tx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&tx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &tx->qe;
- bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
- }
+ list_add(&tx->qe, qe);
}
static void
@@ -3585,7 +3277,6 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
@@ -3593,10 +3284,8 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
tx->hw_id = cfg_rsp->hw_id;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
+ i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
@@ -3624,12 +3313,9 @@ void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
- }
}
void
@@ -3689,7 +3375,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
- struct list_head *qe;
int page_count;
int i;
@@ -3719,9 +3404,8 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
- bfa_q_deq(&tx_mod->txq_free_q, &txq);
- bfa_q_qe_init(&txq->qe);
- list_add_tail(&txq->qe, &tx->txq_q);
+ txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
+ list_move_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
@@ -3760,8 +3444,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
/* TxQ */
i = 0;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
@@ -3779,7 +3462,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
- txq->ib.intr_vector = (1 << txq->ib.intr_vector);
+ txq->ib.intr_vector = BIT(txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
@@ -3821,7 +3504,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- tx_mod->rid_mask |= (1 << tx->rid);
+ tx_mod->rid_mask |= BIT(tx->rid);
return tx;
@@ -3834,15 +3517,12 @@ void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
- }
- tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+ tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
bna_tx_free(tx);
}
@@ -3920,9 +3600,7 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
- bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
- bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
@@ -3935,17 +3613,6 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &tx_mod->tx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &tx_mod->txq_free_q)
- i++;
-
tx_mod->bna = NULL;
}
@@ -3953,24 +3620,20 @@ void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type)
bna_tx_start(tx);
- }
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
@@ -3979,13 +3642,11 @@ bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
- }
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
@@ -3994,25 +3655,19 @@ void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bna_tx_fail(tx);
- }
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
- }
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d0a7a566f5d6..e0e797f2ea14 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -135,7 +135,6 @@ enum bna_tx_type {
enum bna_tx_flags {
BNA_TX_F_ENET_STARTED = 1,
BNA_TX_F_ENABLED = 2,
- BNA_TX_F_PRIO_CHANGED = 4,
BNA_TX_F_BW_UPDATED = 8,
};
@@ -182,17 +181,11 @@ enum bna_rx_mod_flags {
BNA_RX_MOD_F_ENET_LOOPBACK = 2,
};
-enum bna_rxf_flags {
- BNA_RXF_F_PAUSED = 1,
-};
-
enum bna_rxf_event {
RXF_E_START = 1,
RXF_E_STOP = 2,
RXF_E_FAIL = 3,
RXF_E_CONFIG = 4,
- RXF_E_PAUSE = 5,
- RXF_E_RESUME = 6,
RXF_E_FW_RESP = 7,
};
@@ -362,9 +355,6 @@ struct bna_enet {
void (*stop_cbfn)(void *);
void *stop_cbarg;
- /* Callback for bna_enet_pause_config() */
- void (*pause_cbfn)(struct bnad *);
-
/* Callback for bna_enet_mtu_set() */
void (*mtu_cbfn)(struct bnad *);
@@ -498,9 +488,6 @@ struct bna_tx {
void (*stop_cbfn)(void *arg, struct bna_tx *tx);
void *stop_cbarg;
- /* callback for bna_tx_prio_set() */
- void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
-
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_tx_cfg_req cfg_req;
@@ -676,7 +663,6 @@ struct bna_rx_config {
enum bna_rx_type rx_type;
int num_paths;
enum bna_rxp_type rxp_type;
- int paused;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
@@ -721,7 +707,6 @@ struct bna_rxp {
/* RxF structure (hardware Rx Function) */
struct bna_rxf {
bfa_fsm_t fsm;
- enum bna_rxf_flags flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -742,10 +727,6 @@ struct bna_rxf {
void (*stop_cbfn) (struct bna_rx *rx);
struct bna_rx *stop_cbarg;
- /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
- void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
- struct bnad *oper_state_cbarg;
-
/**
* callback for:
* bna_rxf_ucast_set()
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index caae6cb2bc1a..0612b19f6313 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -57,7 +57,8 @@ static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
-static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] __aligned(2) =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/*
* Local MACROS
@@ -308,7 +309,7 @@ bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
}
}
- BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
+ BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
return 0;
}
@@ -724,7 +725,6 @@ next:
cmpl->valid = 0;
BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
}
- cmpl = &cq[ccb->producer_index];
}
napi_gro_flush(&rx_ctrl->napi, false);
@@ -757,7 +757,7 @@ bnad_msix_rx(int irq, void *data)
struct bna_ccb *ccb = (struct bna_ccb *)data;
if (ccb) {
- ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
}
@@ -875,9 +875,9 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
- memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -946,8 +946,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
uint tx_id, tcb_id;
- printk(KERN_WARNING "bna: %s link up\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link up\n");
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
@@ -966,10 +965,6 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
/*
* Force an immediate
* Transmit Schedule */
- printk(KERN_INFO "bna: %s %d "
- "TXQ_STARTED\n",
- bnad->netdev->name,
- txq_id);
netif_wake_subqueue(
bnad->netdev,
txq_id);
@@ -987,8 +982,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
}
} else {
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_WARNING "bna: %s link down\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link down\n");
netif_carrier_off(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
}
@@ -1058,8 +1052,6 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
txq_id = tcb->id;
clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_subqueue(bnad->netdev, txq_id);
- printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
- bnad->netdev->name, txq_id);
}
}
@@ -1082,8 +1074,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
BUG_ON(*(tcb->hw_consumer_index) != 0);
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
- bnad->netdev->name, txq_id);
netif_wake_subqueue(bnad->netdev, txq_id);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
}
@@ -1094,8 +1084,8 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
- if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
- bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
+ if (is_zero_ether_addr(bnad->perm_addr)) {
+ bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
}
@@ -1703,7 +1693,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1714,7 +1704,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1725,7 +1715,7 @@ bnad_iocpf_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1736,7 +1726,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1862,8 +1852,7 @@ bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
struct netdev_hw_addr *mc_addr;
netdev_for_each_mc_addr(mc_addr, netdev) {
- memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
- ETH_ALEN);
+ ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
i++;
}
}
@@ -2137,7 +2126,7 @@ bnad_reinit_rx(struct bnad *bnad)
current_err = bnad_setup_rx(bnad, rx_id);
if (current_err && !err) {
err = current_err;
- pr_err("RXQ:%u setup failed\n", rx_id);
+ netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
}
}
@@ -2338,7 +2327,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad)
* Called with bnad->bna_lock held
*/
int
-bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
{
int ret;
@@ -2349,7 +2338,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
if (!bnad->rx_info[0].rx)
return 0;
- ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
if (ret != BNA_CB_SUCCESS)
return -EADDRNOTAVAIL;
@@ -2367,8 +2356,8 @@ bnad_enable_default_bcast(struct bnad *bnad)
init_completion(&bnad->bnad_completions.mcast_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
- bnad_cb_rx_mcast_add);
+ ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (ret == BNA_CB_SUCCESS)
@@ -2673,8 +2662,9 @@ bnad_enable_msix(struct bnad *bnad)
if (ret < 0) {
goto intx_mode;
} else if (ret < bnad->msix_num) {
- pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
- ret, bnad->msix_num);
+ dev_warn(&bnad->pcidev->dev,
+ "%d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
@@ -2696,7 +2686,8 @@ bnad_enable_msix(struct bnad *bnad)
return;
intx_mode:
- pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
+ dev_warn(&bnad->pcidev->dev,
+ "MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
@@ -2754,7 +2745,7 @@ bnad_open(struct net_device *netdev)
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_mtu_set(&bnad->bna.enet,
BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3128,7 +3119,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
int entry;
if (netdev_uc_empty(bnad->netdev)) {
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
return;
}
@@ -3141,13 +3132,11 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
entry = 0;
netdev_for_each_uc_addr(ha, netdev) {
- memcpy(&mac_list[entry * ETH_ALEN],
- &ha->addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
entry++;
}
- ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
- mac_list, NULL);
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3158,7 +3147,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
/* ucast packets not in UCAM are routed to default function */
mode_default:
bnad->cfg_flags |= BNAD_CF_DEFAULT;
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
}
static void
@@ -3183,12 +3172,11 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
if (mac_list == NULL)
goto mode_allmulti;
- memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
/* copy rest of the MCAST addresses */
bnad_netdev_mc_list_get(netdev, mac_list);
- ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mac_list, NULL);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3198,7 +3186,7 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
mode_allmulti:
bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+ bna_rx_mcast_delall(bnad->rx_info[0].rx);
}
void
@@ -3237,7 +3225,7 @@ bnad_set_rx_mode(struct net_device *netdev)
mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
BNA_RXMODE_ALLMULTI;
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3248,19 +3236,18 @@ bnad_set_rx_mode(struct net_device *netdev)
* in a non-blocking context.
*/
static int
-bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+bnad_set_mac_address(struct net_device *netdev, void *addr)
{
int err;
struct bnad *bnad = netdev_priv(netdev);
- struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ struct sockaddr *sa = (struct sockaddr *)addr;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
-
if (!err)
- memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3487,8 +3474,8 @@ bnad_init(struct bnad *bnad,
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
}
- pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
- (unsigned long long) bnad->mmio_len);
+ dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
spin_lock_irqsave(&bnad->bna_lock, flags);
if (!bnad_msix_disable)
@@ -3609,13 +3596,10 @@ bnad_pci_probe(struct pci_dev *pdev,
struct bfa_pcidev pcidev_info;
unsigned long flags;
- pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
- pdev, pcidev_id, PCI_FUNC(pdev->devfn));
-
mutex_lock(&bnad_fwimg_mutex);
if (!cna_get_firmware_buf(pdev)) {
mutex_unlock(&bnad_fwimg_mutex);
- pr_warn("Failed to load Firmware Image!\n");
+ dev_err(&pdev->dev, "failed to load firmware image!\n");
return -ENODEV;
}
mutex_unlock(&bnad_fwimg_mutex);
@@ -3693,13 +3677,13 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set up timers */
setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
/*
* Start the chip
@@ -3708,8 +3692,7 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
err = bnad_ioceth_enable(bnad);
if (err) {
- pr_err("BNA: Initialization failed err=%d\n",
- err);
+ dev_err(&pdev->dev, "initialization failed err=%d\n", err);
goto probe_success;
}
@@ -3742,7 +3725,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Get the burnt-in mac */
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
+ bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3751,7 +3734,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
- pr_err("BNA : Registering with netdev failed\n");
+ dev_err(&pdev->dev, "registering net device failed\n");
goto probe_uninit;
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
@@ -3803,7 +3786,6 @@ bnad_pci_remove(struct pci_dev *pdev)
if (!netdev)
return;
- pr_info("%s bnad_pci_remove\n", netdev->name);
bnad = netdev_priv(netdev);
bna = &bnad->bna;
@@ -3864,15 +3846,14 @@ bnad_module_init(void)
{
int err;
- pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
+ pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
if (err < 0) {
- pr_err("bna : PCI registration failed in module init "
- "(%d)\n", err);
+ pr_err("bna: PCI driver registration failed err=%d\n", err);
return err;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 7ead6c23edb6..faedbf24777e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -344,7 +344,7 @@ struct bnad {
struct bnad_completion bnad_completions;
/* Burnt in MAC address */
- mac_t perm_addr;
+ u8 perm_addr[ETH_ALEN];
struct workqueue_struct *work_q;
@@ -385,7 +385,7 @@ u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
void bnad_set_rx_mode(struct net_device *netdev);
struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
-int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr);
int bnad_enable_default_bcast(struct bnad *bnad);
void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
void bnad_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 72c89550417c..8fc246ea1fb8 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -76,8 +76,7 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bnad %s: Failed to collect fwtrc\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwtrc\n");
return -ENOMEM;
}
@@ -117,8 +116,7 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bna %s: Failed to collect fwsave\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwsave\n");
return -ENOMEM;
}
@@ -217,8 +215,7 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
drv_info->debug_buffer = NULL;
kfree(drv_info);
drv_info = NULL;
- pr_warn("bna %s: Failed to collect drvinfo\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect drvinfo\n");
return -ENOMEM;
}
@@ -271,15 +268,15 @@ bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
area = (offset >> 15) & 0x7;
if (area == 0) {
/* PCIe core register */
- if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ if (offset + (len << 2) > 0x8000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else if (area == 0x1) {
/* CB 32 KB memory page */
- if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ if (offset + (len << 2) > 0x10000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else {
/* CB register space 64KB */
- if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ if (offset + (len << 2) > BFA_REG_ADDRMSK(ioc))
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
@@ -321,27 +318,20 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bnad->regdata);
- bnad->regdata = NULL;
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
@@ -355,8 +345,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, len);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
@@ -388,20 +377,14 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
@@ -412,8 +395,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, 1);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
return -EINVAL;
}
@@ -525,7 +507,8 @@ bnad_debugfs_init(struct bnad *bnad)
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
if (!bna_debugfs_root) {
- pr_warn("BNA: debugfs root dir creation failed\n");
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
}
@@ -536,8 +519,8 @@ bnad_debugfs_init(struct bnad *bnad)
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
if (!bnad->port_debugfs_root) {
- pr_warn("bna pci_dev %s: root dir creation failed\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
@@ -552,9 +535,9 @@ bnad_debugfs_init(struct bnad *bnad)
bnad,
file->fops);
if (!bnad->bnad_dentry_files[i]) {
- pr_warn(
- "BNA pci_dev:%s: create %s entry failed\n",
- pci_name(bnad->pcidev), file->name);
+ netdev_warn(bnad->netdev,
+ "create %s entry failed\n",
+ file->name);
return;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 12f344debd1c..2bdfc5dff4b1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -445,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ !is_power_of_2(ringparam->rx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ !is_power_of_2(ringparam->tx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
@@ -533,7 +533,7 @@ bnad_set_pauseparam(struct net_device *netdev,
pause_config.rx_pause = pauseparam->rx_pause;
pause_config.tx_pause = pauseparam->tx_pause;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
mutex_unlock(&bnad->conf_mutex);
@@ -1080,7 +1080,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
if (ret) {
- pr_err("BNA: Can't locate firmware %s\n", eflash->data);
+ netdev_err(netdev, "can't load firmware %s\n", eflash->data);
goto out;
}
@@ -1093,7 +1093,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
bnad->id, (u8 *)fw->data, fw->size, 0,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
- pr_warn("BNA: Flash update failed with err: %d\n", ret);
+ netdev_warn(netdev, "flash update failed with err=%d\n", ret);
ret = -EIO;
spin_unlock_irq(&bnad->bna_lock);
goto out;
@@ -1103,8 +1103,9 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
wait_for_completion(&fcomp.comp);
if (fcomp.comp_status != BFA_STATUS_OK) {
ret = -EIO;
- pr_warn("BNA: Firmware image update to flash failed with: %d\n",
- fcomp.comp_status);
+ netdev_warn(netdev,
+ "firmware image update failed with err=%d\n",
+ fcomp.comp_status);
}
out:
release_firmware(fw);
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 28e7d0ffeab1..75f8f1ac9fb7 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -42,66 +42,4 @@ extern char bfa_version[];
#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
-#pragma pack(1)
-
-typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
-
-#pragma pack()
-
-#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
-#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
-#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
-
-/*
- * bfa_q_qe_init - to initialize a queue element
- */
-#define bfa_q_qe_init(_qe) { \
- bfa_q_next(_qe) = (struct list_head *) NULL; \
- bfa_q_prev(_qe) = (struct list_head *) NULL; \
-}
-
-/*
- * bfa_q_deq - dequeue an element from head of the queue
- */
-#define bfa_q_deq(_q, _qe) { \
- if (!list_empty(_q)) { \
- (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
- bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **)(_qe)) = NULL; \
- } \
-}
-
-/*
- * bfa_q_deq_tail - dequeue an element from tail of the queue
- */
-#define bfa_q_deq_tail(_q, _qe) { \
- if (!list_empty(_q)) { \
- *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
- bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
- } \
-}
-
-/*
- * bfa_add_tail_head - enqueue an element at the head of queue
- */
-#define bfa_q_enq_head(_q, _qe) { \
- if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
- pr_err("Assertion failure: %s:%d: %d", \
- __FILE__, __LINE__, \
- (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
- bfa_q_next(_qe) = bfa_q_next(_q); \
- bfa_q_prev(_qe) = (struct list_head *) (_q); \
- bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
- bfa_q_next(_q) = (struct list_head *) (_qe); \
-}
-
#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index badea368bdc8..2e7fb97883dc 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -33,7 +33,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 n;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
- pr_alert("Can't locate firmware %s\n", fw_name);
+ dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
goto error;
}
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index fc3d8e3ee807..c4d6bbe9458d 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -4,37 +4,54 @@
config NET_VENDOR_CAVIUM
tristate "Cavium ethernet drivers"
- depends on PCI && 64BIT
+ depends on PCI
+ default y
---help---
- Enable support for the Cavium ThunderX Network Interface
- Controller (NIC). The NIC provides the controller and DMA
- engines to move network traffic to/from the memory. The NIC
- works closely with TNS, BGX and SerDes to implement the
- functions replacing and virtualizing those of a typical
- standalone PCIe NIC chip.
+ Select this option if you want enable Cavium network support.
- If you have a Cavium Thunder board, say Y.
+ If you have a Cavium SoC or network adapter, say Y.
if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
- default NET_VENDOR_CAVIUM
+ depends on 64BIT
+ default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
+ The NIC provides the controller and DMA engines to
+ move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
- default NET_VENDOR_CAVIUM
+ depends on 64BIT
+ default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
- default NET_VENDOR_CAVIUM
+ depends on 64BIT
+ default ARCH_THUNDER
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
+config LIQUIDIO
+ tristate "Cavium LiquidIO support"
+ depends on 64BIT
+ select PTP_1588_CLOCK
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Cavium LiquidIO Intelligent Server Adapters
+ based on CN66XX and CN68XX chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called liquidio. This is recommended.
+
endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 7aac4780d050..d22f886ac291 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,5 +1,5 @@
#
# Makefile for the Cavium ethernet device drivers.
#
-
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
new file mode 100644
index 000000000000..2f366806835d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -0,0 +1,16 @@
+#
+# Cavium Liquidio ethernet device driver
+#
+obj-$(CONFIG_LIQUIDIO) += liquidio.o
+
+liquidio-objs := lio_main.o \
+ lio_ethtool.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_console.o \
+ octeon_nic.o
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
new file mode 100644
index 000000000000..8ad7425f89bf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -0,0 +1,796 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+int lio_cn6xxx_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
+
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
+ octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
+
+ lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
+
+ /* make sure that the reset is written before starting timer */
+ mmiowb();
+
+ /* Wait for 10ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 val;
+
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+ if (val & 0x000f0000) {
+ dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
+ val & 0x000f0000);
+ }
+
+ val |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+}
+
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MPS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mps == PCIE_MPS_DEFAULT) {
+ mps = ((val & (0x7 << 5)) >> 5);
+ } else {
+ val &= ~(0x7 << 5); /* Turn off any MPS bits */
+ val |= (mps << 5); /* Set MPS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= (mps << 4);
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MRRS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mrrs == PCIE_MRRS_DEFAULT) {
+ mrrs = ((val & (0x7 << 12)) >> 12);
+ } else {
+ val &= ~(0x7 << 12); /* Turn off any MRRS bits */
+ val |= (mrrs << 12); /* Set MRRS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
+ r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
+ r64 |= mrrs;
+ octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
+
+ /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= mrrs;
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
+ * for SLI.
+ */
+ return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
+}
+
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
+ u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
+
+ /* core clock per us / oq ticks will be fractional. TO avoid that
+ * we use the method below.
+ */
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
+{
+ /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
+ CN6XXX_INPUT_CTL_MASK);
+
+ /* Instruction Read Size - Max 4 instructions per PCIE Read */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
+ 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Select PCIE Port for all Input rings. */
+ octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
+ (oct->pcie_port * 0x5555555555555555ULL));
+}
+
+static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ u64 pktctl;
+
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 66XX SPECIFIC */
+ if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
+ /* Disable RING_EN if only upto 4 rings are used. */
+ pktctl &= ~(1 << 4);
+ else
+ pktctl |= (1 << 4);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 time_threshold;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ /* / Select PCI-E Port for all Output queues */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
+ (oct->pcie_port * 0x5555555555555555ULL));
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
+ } else {
+ /* / Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
+ }
+
+ /* / Select Info Ptr for length & data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
+
+ /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
+
+ /* / Select ES,RO,NS setting from register for Output Queue Packet
+ * Address
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
+ * Queue ScatterList
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
+
+ /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
+#ifdef __BIG_ENDIAN_BITFIELD
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
+ 0x5555555555555555ULL);
+#else
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
+#endif
+
+ /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
+ 0x5555555555555555ULL);
+
+ /* / Set up interrupt packet and time threshold */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
+ time_threshold =
+ lio_cn6xxx_get_oq_ticks(oct, (u32)
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+}
+
+static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn66xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+ return 0;
+}
+
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this
+ * queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr
+ + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter
+ * (used in flush_iq calculation)
+ */
+ iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
+}
+
+static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ lio_cn6xxx_setup_iq_regs(oct, iq_no);
+
+ /* Backpressure for this queue - WMARK set to all F's. This effectively
+ * disables the backpressure mechanism.
+ */
+ octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
+ (0xFFFFFFFFULL << 32));
+}
+
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 intr;
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
+}
+
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+{
+ u32 mask;
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
+ mask |= oct->io_qmask.iq64B;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask |= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask |= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+}
+
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
+{
+ u32 mask, i, loop = HZ;
+ u32 d32;
+
+ /* Reset the Enable bits for Input Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask ^= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ mask = oct->io_qmask.iq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for each Input queue. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
+ }
+
+ /* Reset the Enable bits for Output Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask ^= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ loop = HZ;
+ mask = oct->io_qmask.oq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ schedule_timeout_uninterruptible(1);
+ }
+ ;
+
+ /* Reset the doorbell register for each Output queue. */
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
+ }
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
+}
+
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_iq_regs(oct, i);
+ }
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_oq_regs(oct, i);
+ }
+
+ oct->fn_list.setup_device_regs(oct);
+
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ oct->fn_list.enable_io_queues(oct);
+
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
+ }
+}
+
+void
+lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
+ u64 core_addr,
+ u32 idx,
+ int valid)
+{
+ u64 bar1;
+
+ if (valid == 0) {
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ return;
+ }
+
+ /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
+ * the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
+ u32 idx,
+ u32 mask)
+{
+ lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq)
+{
+ u32 new_idx = readl(iq->inst_cnt_reg);
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over. We have
+ * noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->max_count;
+
+ return new_idx;
+}
+
+void lio_cn6xxx_enable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
+
+ /* Enable Interrupt */
+ writeq(mask, cn6xxx->intr_enb_reg64);
+}
+
+void lio_cn6xxx_disable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ /* Disable Interrupts */
+ writeq(0, cn6xxx->intr_enb_reg64);
+
+ /* make sure interrupts are really disabled */
+ mmiowb();
+}
+
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
+ * to determine the PCIE port #
+ */
+ oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
+}
+
+void
+lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
+{
+ dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
+ CVM_CAST64(intr64));
+}
+
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+{
+ struct octeon_droq *droq;
+ u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ u32 droq_cnt_enb, droq_cnt_mask;
+
+ droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ droq_mask = droq_cnt_mask & droq_cnt_enb;
+
+ droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ droq_mask |= (droq_time_mask & droq_int_enb);
+
+ droq_mask &= oct->io_qmask.oq;
+
+ oct->droq_intr = 0;
+
+ /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(droq_mask & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+ pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ if (pkt_count) {
+ oct->droq_intr |= (1ULL << oq_no);
+ if (droq->ops.poll_mode) {
+ u32 value;
+ u32 reg;
+
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ /* disable interrupts for this droq */
+ spin_lock
+ (&cn6xxx->lock_for_droq_int_enb_reg);
+ reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+ reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+
+ /* Ensure that the enable register is written.
+ */
+ mmiowb();
+
+ spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
+ }
+ }
+ }
+
+ droq_time_mask &= oct->io_qmask.oq;
+ droq_cnt_mask &= oct->io_qmask.oq;
+
+ /* Reset the PKT_CNT/TIME_INT registers. */
+ if (droq_time_mask)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
+
+ if (droq_cnt_mask) /* reset PKT_CNT register:66xx */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
+
+ return 0;
+}
+
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u64 intr64;
+
+ intr64 = readq(cn6xxx->intr_sum_reg64);
+
+ /* If our device has interrupted, then proceed.
+ * Also check for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
+ return IRQ_NONE;
+
+ oct->int_status = 0;
+
+ if (intr64 & CN6XXX_INTR_ERR)
+ lio_cn6xxx_process_pcie_error_intr(oct, intr64);
+
+ if (intr64 & CN6XXX_INTR_PKT_DATA) {
+ lio_cn6xxx_process_droq_intr_regs(oct);
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & CN6XXX_INTR_DMA0_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+
+ if (intr64 & CN6XXX_INTR_DMA1_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn6xxx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
+ void *chip,
+ struct octeon_reg_list *reg_list)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ reg_list->pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
+ reg_list->pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
+ reg_list->pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
+
+ reg_list->pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
+ reg_list->pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
+ reg_list->pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
+
+ reg_list->pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
+ reg_list->pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
+ reg_list->pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
+
+ reg_list->pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
+ reg_list->pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
+ reg_list->pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
+
+ lio_cn6xxx_get_pcie_qlmport(oct);
+
+ cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
+ cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
+ cn6xxx->intr_enb_reg64 =
+ bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
+}
+
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ cn6xxx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, LIO_210SV);
+ if (!cn6xxx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *conf6xxx)
+{
+ /* int total_instrs = 0; */
+
+ if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+ if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
new file mode 100644
index 000000000000..f77918779355
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -0,0 +1,107 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_device.h
+ * \brief Host Driver: Routines that perform CN66XX specific operations.
+ */
+
+#ifndef __CN66XX_DEVICE_H__
+#define __CN66XX_DEVICE_H__
+
+/* Register address and configuration for a CN6XXX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn6xxx {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+
+ /* Example additional fields - not used currently
+ * struct {
+ * }cn6xyz;
+ */
+
+ /* For the purpose of atomic access to interrupt enable reg */
+ spinlock_t lock_for_droq_int_enb_reg;
+
+};
+
+enum octeon_pcie_mps {
+ PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MPS_128B = 0,
+ PCIE_MPS_256B = 1
+};
+
+enum octeon_pcie_mrrs {
+ PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MRRS_128B = 0,
+ PCIE_MRRS_256B = 1,
+ PCIE_MRRS_512B = 2,
+ PCIE_MRRS_1024B = 3,
+ PCIE_MRRS_2048B = 4,
+ PCIE_MRRS_4096B = 5
+};
+
+/* Common functions for 66xx and 68xx */
+int lio_cn6xxx_soft_reset(struct octeon_device *oct);
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps);
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs);
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
+void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid);
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq);
+void lio_cn6xxx_enable_interrupt(void *chip);
+void lio_cn6xxx_disable_interrupt(void *chip);
+void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
+ struct octeon_reg_list *reg_list);
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *);
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
new file mode 100644
index 000000000000..5e3aff242ad3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -0,0 +1,535 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN66XX devices.
+ */
+
+#ifndef __CN66XX_REGS_H__
+#define __CN66XX_REGS_H__
+
+#define CN6XXX_XPANSION_BAR 0x30
+
+#define CN6XXX_MSI_CAP 0x50
+#define CN6XXX_MSI_ADDR_LO 0x54
+#define CN6XXX_MSI_ADDR_HI 0x58
+#define CN6XXX_MSI_DATA 0x5C
+
+#define CN6XXX_PCIE_CAP 0x70
+#define CN6XXX_PCIE_DEVCAP 0x74
+#define CN6XXX_PCIE_DEVCTL 0x78
+#define CN6XXX_PCIE_LINKCAP 0x7C
+#define CN6XXX_PCIE_LINKCTL 0x80
+#define CN6XXX_PCIE_SLOTCAP 0x84
+#define CN6XXX_PCIE_SLOTCTL 0x88
+
+#define CN6XXX_PCIE_ENH_CAP 0x100
+#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104
+#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108
+#define CN6XXX_PCIE_UNCORR_ERR 0x10C
+#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110
+#define CN6XXX_PCIE_CORR_ERR_MASK 0x114
+#define CN6XXX_PCIE_ADV_ERR_CAP 0x118
+
+#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700
+#define CN6XXX_PCIE_OTHER_MSG 0x704
+#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708
+#define CN6XXX_PCIE_ACK_FREQ 0x70C
+#define CN6XXX_PCIE_PORT_LINK_CTL 0x710
+#define CN6XXX_PCIE_LANE_SKEW 0x714
+#define CN6XXX_PCIE_SYM_NUM 0x718
+#define CN6XXX_PCIE_FLTMSK 0x720
+
+/* ############## BAR0 Registers ################ */
+
+#define CN6XXX_SLI_CTL_PORT0 0x0050
+#define CN6XXX_SLI_CTL_PORT1 0x0060
+
+#define CN6XXX_SLI_WINDOW_CTL 0x02E0
+#define CN6XXX_SLI_DBG_DATA 0x0310
+#define CN6XXX_SLI_SCRATCH1 0x03C0
+#define CN6XXX_SLI_SCRATCH2 0x03D0
+#define CN6XXX_SLI_CTL_STATUS 0x0570
+
+#define CN6XXX_WIN_WR_ADDR_LO 0x0000
+#define CN6XXX_WIN_WR_ADDR_HI 0x0004
+#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO
+
+#define CN6XXX_WIN_RD_ADDR_LO 0x0010
+#define CN6XXX_WIN_RD_ADDR_HI 0x0014
+#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO
+
+#define CN6XXX_WIN_WR_DATA_LO 0x0020
+#define CN6XXX_WIN_WR_DATA_HI 0x0024
+#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO
+
+#define CN6XXX_WIN_RD_DATA_LO 0x0040
+#define CN6XXX_WIN_RD_DATA_HI 0x0044
+#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO
+
+#define CN6XXX_WIN_WR_MASK_LO 0x0030
+#define CN6XXX_WIN_WR_MASK_HI 0x0034
+#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO
+
+/* 1 register (32-bit) to enable Input queues */
+#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000
+
+/* 1 register (32-bit) to enable Output queues */
+#define CN6XXX_SLI_PKT_OUT_ENB 0x1010
+
+/* 1 register (32-bit) to determine whether Output queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0
+
+/* 1 register (32-bit) to determine whether Input queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 1 register (32-bit) - instr. size of each input queue. */
+#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020
+
+/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000
+
+/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800
+
+/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00
+
+/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN6XXX_SLI_IQ_SIZE_START 0x3000
+
+/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400
+
+/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */
+#define CN66XX_SLI_INPUT_BP_START64 0x3800
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_IQ_OFFSET 0x10
+
+/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT_INPUT_CONTROL.
+ */
+#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170
+
+/* 1 register (64-bit) - Number of instructions to read at one time
+ * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE.
+ */
+#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0
+
+/* 1 register (64-bit) - Assign Input ring to MAC port
+ * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT.
+ */
+#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0
+
+/*------- Request Queue Macros ---------*/
+#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_SIZE(iq) \
+ (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \
+ (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_DOORBELL(iq) \
+ (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \
+ (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN66XX_SLI_IQ_BP64(iq) \
+ (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22)
+#define CN6XXX_INPUT_CTL_DATA_NS BIT(8)
+#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN6XXX_INPUT_CTL_DATA_RO BIT(5)
+#define CN6XXX_INPUT_CTL_USE_CSR BIT(4)
+#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3)
+#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2)
+#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR \
+ | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP)
+#else
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR)
+#endif
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00
+
+/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400
+
+/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800
+
+/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN6XXX_SLI_OQ_SIZE_START 0x1C00
+
+/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_OQ_OFFSET 0x10
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_ROR
+ */
+#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_NS
+ */
+#define CN6XXX_SLI_PKT_SLIST_NS 0x1040
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue descriptors
+ * - SLI_PKT_SLIST_ES
+ */
+#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - InfoPtr mode for Output Queues.
+ * - SLI_PKT_IPTR
+ */
+#define CN6XXX_SLI_PKT_IPTR 0x1070
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - DPTR format selector for Output queues.
+ * - SLI_PKT_DPADDR
+ */
+#define CN6XXX_SLI_PKT_DPADDR 0x1080
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_ROR
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_NS
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue data
+ * - SLI_PKT_DATA_OUT_ES
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets.
+ * - SLI_PKT_OUT_BMODE
+ */
+#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Assign PCIE port for Output queues
+ * - SLI_PKT_PCIE_PORT.
+ */
+#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0
+
+/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold
+ * & Time Threshold. The same setting applies to all 32 queues.
+ * The register is defined as a 64-bit registers, but we use the
+ * 32-bit offsets to define distinct addresses.
+ */
+#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120
+#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN6XXX_SLI_OQ_WMARK 0x1180
+
+/* 1 register to control output queue global backpressure & ring enable. */
+#define CN6XXX_SLI_PKT_CTL 0x1220
+
+/*------- Output Queue Macros ---------*/
+#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_SIZE(oq) \
+ (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \
+ (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN6XXX_DMA_CNT_START 0x0400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values
+ * SLI_DMA_0_TIM
+ */
+#define CN6XXX_DMA_TIM_START 0x0420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN6XXX_DMA_INT_LEVEL_START 0x03E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN6XXX_DMA_CNT(dq) \
+ (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIM(dq) \
+ (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN6XXX_SLI_INT_SUM64 0x0330
+
+/* 1 register (64-bit) for Interrupt Enable */
+#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340
+#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350
+
+/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */
+#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150
+
+/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */
+#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160
+
+/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */
+#define CN6XXX_SLI_PKT_CNT_INT 0x1130
+
+/* 1 register (32-bit) to indicate which Output Queue reached time threshold */
+#define CN6XXX_SLI_PKT_TIME_INT 0x1140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1)
+#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2)
+#define CN6XXX_INTR_IO2BIG_ERR BIT(3)
+#define CN6XXX_INTR_PKT_COUNT BIT(4)
+#define CN6XXX_INTR_PKT_TIME BIT(5)
+#define CN6XXX_INTR_M0UPB0_ERR BIT(8)
+#define CN6XXX_INTR_M0UPWI_ERR BIT(9)
+#define CN6XXX_INTR_M0UNB0_ERR BIT(10)
+#define CN6XXX_INTR_M0UNWI_ERR BIT(11)
+#define CN6XXX_INTR_M1UPB0_ERR BIT(12)
+#define CN6XXX_INTR_M1UPWI_ERR BIT(13)
+#define CN6XXX_INTR_M1UNB0_ERR BIT(14)
+#define CN6XXX_INTR_M1UNWI_ERR BIT(15)
+#define CN6XXX_INTR_MIO_INT0 BIT(16)
+#define CN6XXX_INTR_MIO_INT1 BIT(17)
+#define CN6XXX_INTR_MAC_INT0 BIT(18)
+#define CN6XXX_INTR_MAC_INT1 BIT(19)
+
+#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33)
+#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35)
+#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37)
+#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48)
+#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49)
+#define CN6XXX_INTR_POUT_ERR BIT_ULL(50)
+#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51)
+#define CN6XXX_INTR_PGL_ERR BIT_ULL(52)
+#define CN6XXX_INTR_PDI_ERR BIT_ULL(53)
+#define CN6XXX_INTR_POP_ERR BIT_ULL(54)
+#define CN6XXX_INTR_PINS_ERR BIT_ULL(55)
+#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56)
+#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57)
+#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60)
+
+#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME)
+
+#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME)
+
+#define CN6XXX_INTR_DMA_DATA \
+ (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA)
+
+#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \
+ CN6XXX_INTR_PKT_COUNT)
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN6XXX_INTR_PCIE_DATA \
+ (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA)
+
+#define CN6XXX_INTR_MIO \
+ (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1)
+
+#define CN6XXX_INTR_MAC \
+ (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1)
+
+/* Sum of interrupts for error events */
+#define CN6XXX_INTR_ERR \
+ (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \
+ | CN6XXX_INTR_IO2BIG_ERR \
+ | CN6XXX_INTR_M0UPB0_ERR \
+ | CN6XXX_INTR_M0UPWI_ERR \
+ | CN6XXX_INTR_M0UNB0_ERR \
+ | CN6XXX_INTR_M0UNWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UPWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNWI_ERR \
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \
+ | CN6XXX_INTR_POUT_ERR \
+ | CN6XXX_INTR_PIN_BP_ERR \
+ | CN6XXX_INTR_PGL_ERR \
+ | CN6XXX_INTR_PDI_ERR \
+ | CN6XXX_INTR_POP_ERR \
+ | CN6XXX_INTR_PINS_ERR \
+ | CN6XXX_INTR_SPRT0_ERR \
+ | CN6XXX_INTR_SPRT1_ERR \
+ | CN6XXX_INTR_ILL_PAD_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN6XXX_INTR_MASK \
+ (CN6XXX_INTR_PCIE_DATA \
+ | CN6XXX_INTR_DMA0_FORCE \
+ | CN6XXX_INTR_DMA1_FORCE \
+ | CN6XXX_INTR_MIO \
+ | CN6XXX_INTR_MAC \
+ | CN6XXX_INTR_ERR)
+
+#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80
+#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90
+#define CN6XXX_SLI_S2M_PORTX_CTL(port) \
+ (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10))
+
+#define CN6XXX_SLI_INT_ENB64(port) \
+ (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10))
+
+#define CN6XXX_SLI_MAC_NUMBER 0x3E00
+
+/* CN6XXX BAR1 Index registers. */
+#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL
+#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL
+
+#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000
+#define CN6XXX_PCI_BAR1_OFFSET 0x8
+
+#define CN6XXX_BAR1_REG(idx, port) \
+ (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \
+ (CN6XXX_PCI_BAR1_OFFSET * (idx)))
+
+/*############################ DPI #########################*/
+
+#define CN6XXX_DPI_CTL 0x0001df0000000040ULL
+
+#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+
+#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8))
+
+#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+
+#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
+
+#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL
+#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL
+#define CN6XXX_DPI_SLI_PRTX_CFG(port) \
+ (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10))
+
+#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57)
+#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15)
+#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14)
+
+#define CN6XXX_DPI_DMA_CTL_MASK \
+ (CN6XXX_DPI_DMA_COMMIT_MODE | \
+ CN6XXX_DPI_DMA_PKT_HP | \
+ CN6XXX_DPI_DMA_PKT_EN | \
+ CN6XXX_DPI_DMA_O_ES | \
+ CN6XXX_DPI_DMA_O_MODE)
+
+/*############################ CIU #########################*/
+
+#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL
+#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL
+
+/*############################ MIO #########################*/
+#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL
+#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL
+
+#define CN6XXX_MIO_QLM_CFG_MASK 0x7
+
+/*############################ LMC #########################*/
+
+#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
new file mode 100644
index 000000000000..8e830d0c0754
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
+{
+ u32 i;
+ u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
+
+ lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ /* Prevent service of instruction queue for all DMA engines
+ * Engine 5 will remain 0. Engines 0 - 4 will be setup by
+ * core.
+ */
+ lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
+ lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
+ dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
+ * separately.
+ */
+
+ lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_CTL));
+}
+
+static int lio_cn68xx_soft_reset(struct octeon_device *oct)
+{
+ lio_cn6xxx_soft_reset(oct);
+ lio_cn68xx_set_dpi_regs(oct);
+
+ return 0;
+}
+
+static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u64 pktctl, tx_pipe, max_oqs;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 68XX specific */
+ max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf));
+ tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
+ tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
+ tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
+ octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn68xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+
+ return 0;
+}
+
+static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
+{
+ u32 val = 0;
+
+ /* Set M_VEND1_DRP and M_VEND0_DRP bits */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
+ val |= 0x3;
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
+}
+
+int lio_is_210nv(struct octeon_device *oct)
+{
+ u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
+
+ return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
+}
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u16 card_type = LIO_410NV;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ /* Determine variant of card */
+ if (lio_is_210nv(oct))
+ card_type = LIO_210NV;
+
+ cn68xx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, card_type);
+ if (!cn68xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
+ __func__,
+ (card_type == LIO_410NV) ? LIO_410NV_NAME :
+ LIO_210NV_NAME);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ lio_cn68xx_vendor_message_fix(oct);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
new file mode 100644
index 000000000000..d4e1c9fb0bf2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -0,0 +1,33 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_device.h
+ * \brief Host Driver: Routines that perform CN68XX specific operations.
+ */
+
+#ifndef __CN68XX_DEVICE_H__
+#define __CN68XX_DEVICE_H__
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
+int lio_is_210nv(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
new file mode 100644
index 000000000000..38cddbd107b6
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -0,0 +1,51 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN68XX devices. The register map for CN66XX is the same
+ * for most registers. This file has the other registers that are
+ * 68XX-specific.
+ */
+
+#ifndef __CN68XX_REGS_H__
+#define __CN68XX_REGS_H__
+#include "cn66xx_regs.h"
+
+/*###################### REQUEST QUEUE #########################*/
+
+#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800
+
+#define CN68XX_SLI_IQ_PORT_PKIND(iq) \
+ (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* Starting pipe number and number of pipes used by the SLI packet output. */
+#define CN68XX_SLI_TX_PIPE 0x1230
+
+/*######################## INTERRUPTS #########################*/
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN68XX_INTR_PIPE_ERR BIT_ULL(61)
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
new file mode 100644
index 000000000000..160f8077692c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -0,0 +1,1216 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+struct oct_mdio_cmd_context {
+ int octeon_id;
+ wait_queue_head_t wc;
+ int cond;
+};
+
+struct oct_mdio_cmd_resp {
+ u64 rh;
+ struct oct_mdio_cmd resp;
+ u64 status;
+};
+
+#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
+
+/* Octeon's interface mode of operation */
+enum {
+ INTERFACE_MODE_DISABLED,
+ INTERFACE_MODE_RGMII,
+ INTERFACE_MODE_GMII,
+ INTERFACE_MODE_SPI,
+ INTERFACE_MODE_PCIE,
+ INTERFACE_MODE_XAUI,
+ INTERFACE_MODE_SGMII,
+ INTERFACE_MODE_PICMG,
+ INTERFACE_MODE_NPI,
+ INTERFACE_MODE_LOOP,
+ INTERFACE_MODE_SRIO,
+ INTERFACE_MODE_ILK,
+ INTERFACE_MODE_RXAUI,
+ INTERFACE_MODE_QSGMII,
+ INTERFACE_MODE_AGL,
+};
+
+#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
+#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGSVER 1
+
+static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
+ "Instr posted",
+ "Instr processed",
+ "Instr dropped",
+ "Bytes Sent",
+ "Sgentry_sent",
+ "Inst cntreg",
+ "Tx done",
+ "Tx Iq busy",
+ "Tx dropped",
+ "Tx bytes",
+};
+
+static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
+ "OQ Pkts Received",
+ "OQ Bytes Received",
+ "Dropped no dispatch",
+ "Dropped nomem",
+ "Dropped toomany",
+ "Stack RX cnt",
+ "Stack RX Bytes",
+ "RX dropped",
+};
+
+#define OCTNIC_NCMD_AUTONEG_ON 0x1
+#define OCTNIC_NCMD_PHY_ON 0x2
+
+static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+
+ linfo = &lio->linfo;
+
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported =
+ (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
+ SUPPORTED_Pause);
+ ecmd->advertising =
+ (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+ ecmd->transceiver = XCVR_EXTERNAL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ }
+
+ if (linfo->link.s.status) {
+ ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
+ ecmd->duplex = linfo->link.s.duplex;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static void
+lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct lio *lio;
+ struct octeon_device *oct;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio");
+ strcpy(drvinfo->version, LIQUIDIO_VERSION);
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static void
+lio_ethtool_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf6x);
+ max_tx = CFG_GET_IQ_MAX_Q(conf6x);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ channel->max_rx = max_rx;
+ channel->max_tx = max_tx;
+ channel->rx_count = rx_count;
+ channel->tx_count = tx_count;
+}
+
+static int lio_get_eeprom_len(struct net_device *netdev)
+{
+ u8 buf[128];
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return len;
+}
+
+static int
+lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ if (eeprom->offset != 0)
+ return -EINVAL;
+
+ eeprom->magic = oct_dev->pci_dev->vendor;
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len =
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return 0;
+}
+
+static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = addr;
+ nctrl.ncmd.s.param3 = val;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback for when mdio command response arrives
+ */
+static void octnet_mdio_resp_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+
+ oct = lio_get_device(mdio_cmd_ctx->octeon_id);
+ if (status) {
+ dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ } else {
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ }
+ wake_up_interruptible(&mdio_cmd_ctx->wc);
+}
+
+/* This routine provides PHY access routines for
+ * mdio clause45 .
+ */
+static int
+octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct oct_mdio_cmd *mdio_cmd;
+ int retval = 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_mdio_cmd),
+ sizeof(struct oct_mdio_cmd_resp),
+ sizeof(struct oct_mdio_cmd_context));
+
+ if (!sc)
+ return -ENOMEM;
+
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
+
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
+ mdio_cmd->op = op;
+ mdio_cmd->mdio_addr = loc;
+ if (op)
+ mdio_cmd->value1 = *value;
+ mdio_cmd->value2 = lio->linfo.ifidx;
+ octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
+ 0, 0, 0);
+
+ sc->wait_time = 1000;
+ sc->callback = octnet_mdio_resp_callback;
+ sc->callback_arg = sc;
+
+ init_waitqueue_head(&mdio_cmd_ctx->wc);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet_mdio45_access instruction failed status: %x\n",
+ retval);
+ retval = -EBUSY;
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived
+ */
+ sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
+ retval = mdio_cmd_rsp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
+ retval = -EBUSY;
+ } else {
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+ sizeof(struct oct_mdio_cmd) / 8);
+
+ if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (!op)
+ *value = mdio_cmd_rsp->resp.value1;
+ } else {
+ retval = -EINVAL;
+ }
+ }
+ }
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return retval;
+}
+
+static int lio_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int value, ret;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEON);
+ return 2;
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Save the current LED settings */
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ /* Configure Beacon values */
+ value = LIO68XX_LED_BEACON_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
+ if (ret)
+ return ret;
+
+ value = LIO68XX_LED_CTRL_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_ON:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_HIGH);
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_OFF:
+ if (oct->chip_id == OCTEON_CN66XX)
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_LOW);
+ else if (oct->chip_id == OCTEON_CN68XX)
+ return -EINVAL;
+ else
+ return -EINVAL;
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEOFF);
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Restore LED settings */
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+lio_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
+ rx_pending = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ ering->rx_pending = 0;
+ ering->rx_max_pending = 0;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = rx_pending;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = rx_max_pending;
+ } else {
+ ering->rx_pending = rx_pending;
+ ering->rx_max_pending = rx_max_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ }
+
+ ering->tx_pending = tx_pending;
+ ering->tx_max_pending = tx_max_pending;
+}
+
+static u32 lio_get_msglevel(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->msg_enable;
+}
+
+static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
+ if (msglvl & NETIF_MSG_HW)
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE);
+ else
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_DISABLE);
+ }
+
+ lio->msg_enable = msglvl;
+}
+
+static void
+lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers. Just report pause frame support.
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+}
+
+static void
+lio_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i = 0, j;
+
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ continue;
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+ data[i++] =
+ readl(oct_dev->instr_queue[j]->inst_cnt_reg);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ }
+
+ /* for (j = 0; j < oct_dev->num_oqs; j++){ */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ continue;
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+ }
+}
+
+static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_iq_stats, num_oq_stats, i, j;
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static int lio_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
+ (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+}
+
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ /* break; */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ if (!intrmod_cfg->intrmod_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ } else {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->intrmod_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->intrmod_check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->intrmod_maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->intrmod_minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->intrmod_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->intrmod_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->intrmod_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->intrmod_mincnt_trigger;
+ }
+
+ iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+ u32 status,
+ void *ptr)
+{
+ struct oct_intrmod_cmd *cmd = ptr;
+ struct octeon_soft_command *sc = cmd->sc;
+
+ oct_dev = cmd->oct_dev;
+
+ if (status)
+ dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
+ CVM_CAST64(status));
+ else
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation enabled:%llx\n",
+ oct_dev->intrmod.intrmod_enable);
+
+ octeon_free_soft_command(oct_dev, sc);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cmd *cmd;
+ struct oct_intrmod_cfg *cfg;
+ int retval;
+ struct octeon_device *oct_dev = (struct octeon_device *)oct;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_intrmod_cfg),
+ 0,
+ sizeof(struct oct_intrmod_cmd));
+
+ if (!sc)
+ return -ENOMEM;
+
+ cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+ cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
+
+ memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
+ octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
+ cmd->sc = sc;
+ cmd->cfg = cfg;
+ cmd->oct_dev = oct_dev;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
+
+ sc->callback = octnet_intrmod_callback;
+ sc->callback_arg = cmd;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
+ *intr_coal, int adaptive)
+{
+ int ret = 0;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ if (adaptive) {
+ if (intr_coal->rate_sample_interval)
+ intrmod_cfg->intrmod_check_intrvl =
+ intr_coal->rate_sample_interval;
+ else
+ intrmod_cfg->intrmod_check_intrvl =
+ LIO_INTRMOD_CHECK_INTERVAL;
+
+ if (intr_coal->pkt_rate_high)
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ intr_coal->pkt_rate_high;
+ else
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ LIO_INTRMOD_MAXPKT_RATETHR;
+
+ if (intr_coal->pkt_rate_low)
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ intr_coal->pkt_rate_low;
+ else
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ LIO_INTRMOD_MINPKT_RATETHR;
+
+ if (intr_coal->rx_max_coalesced_frames_high)
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ LIO_INTRMOD_MAXCNT_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_high)
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ else
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ LIO_INTRMOD_MAXTMR_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_low)
+ intrmod_cfg->intrmod_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ else
+ intrmod_cfg->intrmod_mintmr_trigger =
+ LIO_INTRMOD_MINTMR_TRIGGER;
+
+ if (intr_coal->rx_max_coalesced_frames_low)
+ intrmod_cfg->intrmod_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->intrmod_mincnt_trigger =
+ LIO_INTRMOD_MINCNT_TRIGGER;
+ }
+
+ intrmod_cfg->intrmod_enable = adaptive;
+ ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+
+ return ret;
+}
+
+static int
+oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 rx_max_coalesced_frames;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Cnt based interrupt values */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ return 0;
+}
+
+static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
+ *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 time_threshold, rx_coalesce_usecs;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Time based interrupt values */
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+
+ return 0;
+}
+
+static int lio_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ u32 j, q_no;
+
+ if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
+ (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j];
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
+ CN6XXX_DB_MAX);
+ return -EINVAL;
+ }
+
+ /* User requested adaptive-rx on */
+ if (intr_coal->use_adaptive_rx_coalesce) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_coalesce_usecs) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off, so use default coalesce params */
+ if ((!intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce) &&
+ (!intr_coal->rx_coalesce_usecs)) {
+ dev_info(&oct->pci_dev->dev,
+ "Turning off adaptive-rx interrupt moderation\n");
+ dev_info(&oct->pci_dev->dev,
+ "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
+ CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ return 0;
+ret_intrmod:
+ return ret;
+}
+
+static int lio_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (lio->ptp_clock)
+ info->phc_index = ptp_clock_index(lio->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+
+static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ /* get the link info */
+ linfo = &lio->linfo;
+
+ if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
+ ecmd->speed != SPEED_10) ||
+ (ecmd->duplex != DUPLEX_HALF &&
+ ecmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ * as they operate at fixed Speed and Duplex settings
+ */
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.wait_time = 1000;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
+ * to SE core application using ncmd.s.more & ncmd.s.param
+ */
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ /* Autoneg ON */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
+ OCTNIC_NCMD_AUTONEG_ON;
+ nctrl.ncmd.s.param2 = ecmd->advertising;
+ } else {
+ /* Autoneg OFF */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
+
+ nctrl.ncmd.s.param3 = ecmd->duplex;
+
+ nctrl.ncmd.s.param2 = ecmd->speed;
+ }
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lio_nway_reset(struct net_device *netdev)
+{
+ if (netif_running(netdev)) {
+ struct ethtool_cmd ecmd;
+
+ memset(&ecmd, 0, sizeof(struct ethtool_cmd));
+ ecmd.autoneg = 0;
+ ecmd.speed = 0;
+ ecmd.duplex = 0;
+ lio_set_settings(netdev, &ecmd);
+ }
+ return 0;
+}
+
+/* Return register dump len. */
+static int lio_get_regs_len(struct net_device *dev)
+{
+ return OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ int i, len = 0;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+ reg = CN6XXX_WIN_WR_ADDR_LO;
+ len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
+ CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
+ CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_LO;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
+ CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
+ CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_LO;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
+ CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
+ CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
+ len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
+ CN6XXX_WIN_WR_MASK_REG,
+ octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
+
+ /* PCI Interrupt Register */
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
+ CN6XXX_SLI_INT_ENB64_PORT0));
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT1,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
+ len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
+
+ /* PCI Output queue registers */
+ for (i = 0; i < oct->num_oqs; i++) {
+ reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
+ len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+
+ /* PCI Input queue registers */
+ for (i = 0; i <= 3; i++) {
+ u32 reg;
+
+ reg = CN6XXX_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
+ len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+
+ /* PCI DMA registers */
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
+ CN6XXX_DMA_CNT(0),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(0),
+ octeon_read_csr(oct, reg));
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
+ CN6XXX_DMA_CNT(1),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+
+ /* PCI Index registers */
+
+ len += sprintf(s + len, "\n");
+
+ for (i = 0; i < 16; i++) {
+ reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
+ len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
+ CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
+ }
+
+ return len;
+}
+
+static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
+{
+ u32 val;
+ int i, len = 0;
+
+ /* PCI CONFIG Registers */
+
+ len += sprintf(s + len,
+ "\n\t Octeon Config space Registers\n\n");
+
+ for (i = 0; i <= 13; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ for (i = 30; i <= 34; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ return len;
+}
+
+/* Return register dump user app. */
+static void lio_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct lio *lio = GET_LIO(dev);
+ int len = 0;
+ struct octeon_device *oct = lio->oct_dev;
+
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
+ regs->version = OCT_ETHTOOL_REGSVER;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ len += cn6xxx_read_csr_reg(regbuf + len, oct);
+ len += cn6xxx_read_config_reg(regbuf + len, oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
+ __func__, oct->chip_id);
+ }
+}
+
+static const struct ethtool_ops lio_ethtool_ops = {
+ .get_settings = lio_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .set_phys_id = lio_set_phys_id,
+ .get_eeprom_len = lio_get_eeprom_len,
+ .get_eeprom = lio_get_eeprom,
+ .get_strings = lio_get_strings,
+ .get_ethtool_stats = lio_get_ethtool_stats,
+ .get_pauseparam = lio_get_pauseparam,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_set_msglevel,
+ .get_sset_count = lio_get_sset_count,
+ .nway_reset = lio_nway_reset,
+ .set_settings = lio_set_settings,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_ts_info = lio_get_ts_info,
+};
+
+void liquidio_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &lio_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
new file mode 100644
index 000000000000..0660deecc2c9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -0,0 +1,3668 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
+
+static int ddr_timeout = 10000;
+module_param(ddr_timeout, int, 0644);
+MODULE_PARM_DESC(ddr_timeout,
+ "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
+
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+static char fw_type[LIO_MAX_FW_TYPE_LEN];
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
+
+static int conf_type;
+module_param(conf_type, int, 0);
+MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
+/* Polling interval for determining when NIC application is alive */
+#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
+
+/* runtime link query interval */
+#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
+
+struct liquidio_if_cfg_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
+struct liquidio_if_cfg_resp {
+ u64 rh;
+ struct liquidio_if_cfg_info cfg_info;
+ u64 status;
+};
+
+struct oct_link_status_resp {
+ u64 rh;
+ struct oct_link_info link_info;
+ u64 status;
+};
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+/** Octeon device properties to be used by the NIC module.
+ * Each octeon device in the system will be represented
+ * by this structure in the NIC module.
+ */
+
+#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+/** Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+};
+
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
+
+struct handshake {
+ struct completion init;
+ struct completion started;
+ struct pci_dev *pci_dev;
+ int init_ok;
+ int started_ok;
+};
+
+struct octeon_device_priv {
+ /** Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+};
+
+static int octeon_device_init(struct octeon_device *);
+static void liquidio_remove(struct pci_dev *pdev);
+static int liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct handshake handshake[MAX_OCTEON_DEVICES];
+static struct completion first_stage;
+
+static void octeon_droq_bh(unsigned long pdev)
+{
+ int q_no;
+ int reschedule = 0;
+ struct octeon_device *oct = (struct octeon_device *)pdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
+ if (!(oct->io_qmask.oq & (1UL << q_no)))
+ continue;
+ reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
+ MAX_PACKET_BUDGET);
+ }
+
+ if (reschedule)
+ tasklet_schedule(&oct_priv->droq_tasklet);
+}
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = 100, pkt_cnt = 0, pending_pkts = 0;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
+ oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
+
+/**
+ * \brief Forces all IO queues off on a given device
+ * @param oct Pointer to Octeon device
+ */
+static void force_io_queues_off(struct octeon_device *oct)
+{
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) {
+ /* Reset the Enable bits for Input Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+
+ /* Reset the Enable bits for Output Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < 100; i++) {
+ pcount =
+ atomic_read(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * \brief Cause device to go quiet so it can be safely removed/reset/etc
+ * @param oct Pointer to Octeon device
+ */
+static inline void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+ force_io_queues_off(oct);
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(100);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * \brief Cleanup PCI AER uncorrectable error status
+ * @param dev Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ int pos = 0x100;
+ u32 status, mask;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * \brief Stop all PCI IO to a given device
+ * @param dev Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ pci_disable_device(oct->pci_dev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ pcierror_quiesce_device(oct);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+}
+
+/**
+ * \brief called when PCI error is detected
+ * @param pdev Pointer to PCI device
+ * @param state The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ /* Always return a DISCONNECT. There is no support for recovery but only
+ * for a clean shutdown.
+ */
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * \brief mmio handler
+ * @param pdev Pointer to PCI device
+ */
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called after the pci bus has been reset.
+ * @param pdev Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the octeon_resume routine.
+ */
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called when traffic can start flowing again.
+ * @param pdev Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the octeon_resume routine.
+ */
+static void liquidio_pcie_resume(struct pci_dev *pdev)
+{
+ /* Nothing to be done here. */
+}
+
+#ifdef CONFIG_PM
+/**
+ * \brief called when suspending
+ * @param pdev Pointer to PCI device
+ * @param state state to suspend to
+ */
+static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+/**
+ * \brief called when resuming
+ * @param pdev Pointer to PCI device
+ */
+static int liquidio_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static struct pci_error_handlers liquidio_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+ .mmio_enabled = liquidio_pcie_mmio_enabled,
+ .slot_reset = liquidio_pcie_slot_reset,
+ .resume = liquidio_pcie_resume,
+};
+
+static const struct pci_device_id liquidio_pci_tbl[] = {
+ { /* 68xx */
+ PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ { /* 66xx */
+ PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+
+static struct pci_driver liquidio_pci_driver = {
+ .name = "LiquidIO",
+ .id_table = liquidio_pci_tbl,
+ .probe = liquidio_probe,
+ .remove = liquidio_remove,
+ .err_handler = &liquidio_err_handler, /* For AER */
+
+#ifdef CONFIG_PM
+ .suspend = liquidio_suspend,
+ .resume = liquidio_resume,
+#endif
+
+};
+
+/**
+ * \brief register PCI driver
+ */
+static int liquidio_init_pci(void)
+{
+ return pci_register_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief unregister PCI driver
+ */
+static void liquidio_deinit_pci(void)
+{
+ pci_unregister_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_stop(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+ } else {
+ netif_stop_queue(netdev);
+ }
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_start(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+ } else {
+ netif_start_queue(netdev);
+ }
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_wake(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+ } else {
+ netif_wake_queue(netdev);
+ }
+}
+
+/**
+ * \brief Stop Tx queue
+ * @param netdev network device
+ */
+static void stop_txq(struct net_device *netdev)
+{
+ txqs_stop(netdev);
+}
+
+/**
+ * \brief Start Tx queue
+ * @param netdev network device
+ */
+static void start_txq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->linfo.link.s.status) {
+ txqs_start(netdev);
+ return;
+ }
+}
+
+/**
+ * \brief Wake a queue
+ * @param netdev network device
+ * @param q which queue to wake
+ */
+static inline void wake_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, q);
+ else
+ netif_wake_queue(netdev);
+}
+
+/**
+ * \brief Stop a queue
+ * @param netdev network device
+ * @param q which queue to stop
+ */
+static inline void stop_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, q);
+ else
+ netif_stop_queue(netdev);
+}
+
+/**
+ * \brief Check Tx queue status, and take appropriate action
+ * @param lio per-network private data
+ * @returns 0 if full, number of queues woken up otherwise
+ */
+static inline int check_txq_status(struct lio *lio)
+{
+ int ret_val = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ int numqs = lio->netdev->num_tx_queues;
+ int q, iq = 0;
+
+ /* check each sub-queue state */
+ for (q = 0; q < numqs; q++) {
+ iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ continue;
+ wake_q(lio->netdev, q);
+ ret_val++;
+ }
+ } else {
+ if (octnet_iq_is_full(lio->oct_dev, lio->txq))
+ return 0;
+ wake_q(lio->netdev, lio->txq);
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if ((root->prev == root) && (root->next == root))
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
+}
+
+/**
+ * \brief Delete gather list
+ * @param lio per-network private data
+ */
+static void delete_glist(struct lio *lio)
+{
+ struct octnic_gather *g;
+
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist);
+ if (g) {
+ if (g->sg)
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather list
+ * @param lio per-network private data
+ */
+static int setup_glist(struct lio *lio)
+{
+ int i;
+ struct octnic_gather *g;
+
+ INIT_LIST_HEAD(&lio->glist);
+
+ for (i = 0; i < lio->tx_qsize; i++) {
+ g = kmalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+ memset(g, 0, sizeof(struct octnic_gather));
+
+ g->sg_size =
+ ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ list_add_tail(&g->list, &lio->glist);
+ }
+
+ if (i == lio->tx_qsize)
+ return 0;
+
+ delete_glist(lio);
+ return 1;
+}
+
+/**
+ * \brief Print link information
+ * @param netdev network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.status) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * \brief Update link status
+ * @param netdev network device
+ * @param ls link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static inline void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+ lio->linfo.link.u64 = ls->u64;
+
+ print_link_info(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ /* start_txq(netdev); */
+ txqs_wake(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ stop_txq(netdev);
+ }
+ }
+}
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static
+void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ u64 oq_no;
+ struct octeon_droq *droq;
+
+ if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(oct->droq_intr & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ oct_priv->napi_mask |= (1 << oq_no);
+ } else {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ }
+ }
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+static
+irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ irqreturn_t ret;
+
+ /* Disable our interrupts for the duration of ISR */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ ret = oct->fn_list.process_interrupt_regs(oct);
+
+ if (ret == IRQ_HANDLED)
+ liquidio_schedule_droq_pkt_handlers(oct);
+
+ /* Re-enable our interrupts */
+ if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ return ret;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+ int irqret, err;
+
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
+ IRQF_SHARED, "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octeon_device *oct_dev = NULL;
+ struct handshake *hs;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = (void *)pdev;
+
+ hs = &handshake[oct_dev->octeon_id];
+ init_completion(&hs->init);
+ init_completion(&hs->started);
+ hs->pci_dev = pdev;
+
+ if (oct_dev->octeon_id == 0)
+ /* first LiquidIO NIC is detected */
+ complete(&first_stage);
+
+ if (octeon_device_init(oct_dev)) {
+ liquidio_remove(pdev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ int i;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ struct handshake *hs;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ /* fallthrough */
+ case OCT_DEV_HOST_OK:
+
+ /* fallthrough */
+ case OCT_DEV_CONSOLE_INIT_DONE:
+ /* Remove any consoles */
+ octeon_remove_consoles(oct);
+
+ /* fallthrough */
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ /* fallthrough */
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ /* Force any pending handshakes to complete */
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+
+ if (hs->pci_dev) {
+ handshake[oct->octeon_id].init_ok = 0;
+ complete(&handshake[oct->octeon_id].init);
+ handshake[oct->octeon_id].started_ok = 0;
+ complete(&handshake[oct->octeon_id].started);
+ }
+ }
+
+ /* fallthrough */
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ /* fallthrough */
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ /* fallthrough */
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ /* fallthrough */
+ case OCT_DEV_PCI_MAP_DONE:
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ /* fallthrough */
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ } /* end switch(oct->status) */
+
+ tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * \brief Send Rx control command
+ * @param lio per-network private data
+ * @param start_stop whether to start or stop
+ */
+static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = start_stop;
+ nctrl.netpndev = (u64)lio->netdev;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+}
+
+/**
+ * \brief Destroy NIC device interface
+ * @param oct octeon device
+ * @param ifidx which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ send_rx_ctrl_cmd(lio, 0);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ txqs_stop(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ delete_glist(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * \brief Stop complete NIC functionality
+ * @param oct octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ int i, j;
+ struct lio *lio;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief Identify the Octeon device and to map the BAR address space
+ * @param oct octeon device
+ */
+static int octeon_chip_specific_setup(struct octeon_device *oct)
+{
+ u32 dev_id, rev_id;
+ int ret = 1;
+
+ pci_read_config_dword(oct->pci_dev, 0, &dev_id);
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ switch (dev_id) {
+ case OCTEON_CN68XX_PCIID:
+ oct->chip_id = OCTEON_CN68XX;
+ ret = lio_setup_cn68xx_octeon_device(oct);
+ break;
+
+ case OCTEON_CN66XX_PCIID:
+ oct->chip_id = OCTEON_CN66XX;
+ ret = lio_setup_cn66xx_octeon_device(oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
+ dev_id);
+ }
+
+ if (!ret)
+ dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ OCTEON_MAJOR_REV(oct),
+ OCTEON_MINOR_REV(oct),
+ octeon_get_conf(oct)->card_name);
+
+ return ret;
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+ /* setup PCI stuff first */
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+/**
+ * \brief Check Tx queue state for a given network buffer
+ * @param lio per-network private data
+ * @param skb network buffer
+ */
+static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0, iq = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ q = skb->queue_mapping;
+ iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ } else {
+ iq = lio->txq;
+ }
+
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ return 0;
+ wake_q(lio->netdev, q);
+ return 1;
+}
+
+/**
+ * \brief Unmap and free network buffer
+ * @param buf buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct sk_buff *skb;
+ struct octnet_buf_free_info *finfo;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ check_txq_state(lio, skb);
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer
+ * @param buf buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ check_txq_state(lio, skb); /* mq support: sub-queue state check */
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer with response
+ * @param buf buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octeon_soft_command *sc;
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ /* Don't free the skb yet */
+
+ check_txq_state(lio, skb);
+}
+
+/**
+ * \brief Adjust ptp frequency
+ * @param ptp PTP clock info
+ * @param ppb how much to adjust by, in parts-per-billion
+ */
+static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ u64 comp, delta;
+ unsigned long flags;
+ bool neg_adj = false;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+
+ /* The hardware adds the clock compensation value to the
+ * PTP clock on every coprocessor clock cycle, so we
+ * compute the delta in terms of coprocessor clocks.
+ */
+ delta = (u64)ppb << 32;
+ do_div(delta, oct->coproc_clock_rate);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
+ if (neg_adj)
+ comp -= delta;
+ else
+ comp += delta;
+ lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Adjust ptp time
+ * @param ptp PTP clock info
+ * @param delta how much to adjust by, in nanosecs
+ */
+static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio->ptp_adjust += delta;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Get hardware clock time, including any adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
+ ns += lio->ptp_adjust;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * \brief Set hardware clock time. Reset adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ ns = timespec_to_ns(ts);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
+ lio->ptp_adjust = 0;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Check if PTP is enabled
+ * @param ptp PTP clock info
+ * @param rq request
+ * @param on is it on
+ */
+static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * \brief Open PTP clock source
+ * @param netdev network device
+ */
+static void oct_ptp_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_init(&lio->ptp_lock);
+
+ snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
+ lio->ptp_info.owner = THIS_MODULE;
+ lio->ptp_info.max_adj = 250000000;
+ lio->ptp_info.n_alarm = 0;
+ lio->ptp_info.n_ext_ts = 0;
+ lio->ptp_info.n_per_out = 0;
+ lio->ptp_info.pps = 0;
+ lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjtime = liquidio_ptp_adjtime;
+ lio->ptp_info.gettime64 = liquidio_ptp_gettime;
+ lio->ptp_info.settime64 = liquidio_ptp_settime;
+ lio->ptp_info.enable = liquidio_ptp_enable;
+
+ lio->ptp_adjust = 0;
+
+ lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
+ &oct->pci_dev->dev);
+
+ if (IS_ERR(lio->ptp_clock))
+ lio->ptp_clock = NULL;
+}
+
+/**
+ * \brief Init PTP clock
+ * @param oct octeon device
+ */
+static void liquidio_ptp_init(struct octeon_device *oct)
+{
+ u64 clock_comp, cfg;
+
+ clock_comp = (u64)NSEC_PER_SEC << 32;
+ do_div(clock_comp, oct->coproc_clock_rate);
+ lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+
+ /* Enable */
+ cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
+ lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
+}
+
+/**
+ * \brief Load firmware to device
+ * @param oct octeon device
+ *
+ * Maps device to firmware filename, requests firmware, and downloads it
+ */
+static int load_firmware(struct octeon_device *oct)
+{
+ int ret = 0;
+ const struct firmware *fw;
+ char fw_name[LIO_MAX_FW_FILENAME_LEN];
+ char *tmp_fw_type;
+
+ if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+ sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+ dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
+ return ret;
+ }
+
+ if (fw_type[0] == '\0')
+ tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
+ else
+ tmp_fw_type = fw_type;
+
+ sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
+ octeon_get_conf(oct)->card_name, tmp_fw_type,
+ LIO_FW_NAME_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
+ fw_name);
+ return ret;
+ }
+
+ ret = octeon_download_firmware(oct, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val == -1)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+ /* tasklet creation for the droq */
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count,
+ oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void if_cfg_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_resp *resp;
+ struct liquidio_if_cfg_context *ctx;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ ACCESS_ONCE(ctx->cond) = 1;
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Select queue based on hash
+ * @param dev Net device
+ * @param skb sk_buff structure
+ * @returns selected queue number
+ */
+static u16 select_q(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ int qindex;
+ struct lio *lio;
+
+ lio = GET_LIO(dev);
+ /* select queue on chosen queue_mapping or core */
+ qindex = skb_rx_queue_recorded(skb) ?
+ skb_get_rx_queue(skb) : smp_processor_id();
+ return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id - octeon device id.
+ * @param skbuff - skbuff struct to be passed to network layer.
+ * @param len - size of total data received.
+ * @param rh - Control header associated with the packet
+ * @param param - additional control data with the packet
+ */
+static void
+liquidio_push_packet(u32 octeon_id,
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param)
+{
+ struct napi_struct *napi = param;
+ struct octeon_device *oct = lio_get_device(octeon_id);
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 ns;
+ struct net_device *netdev =
+ (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ struct octeon_droq *droq = container_of(param, struct octeon_droq,
+ napi);
+ if (netdev) {
+ int packet_was_received;
+ struct lio *lio = GET_LIO(netdev);
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at the
+ * beginning of the packet.
+ */
+ if (ifstate_check(lio,
+ LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns + lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
+
+ if (packet_was_received) {
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
+ netdev->last_rx = jiffies;
+ } else {
+ droq->stats.rx_dropped++;
+ netif_info(lio, rx_err, lio->netdev,
+ "droq:%d error rx_dropped:%llu\n",
+ droq->q_no, droq->stats.rx_dropped);
+ }
+
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * \brief wrapper for calling napi_schedule
+ * @param param parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+ struct napi_struct *napi = param;
+
+ napi_schedule(napi);
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+ struct octeon_droq *droq = arg;
+ int this_cpu = smp_processor_id();
+
+ if (droq->cpu_id == this_cpu) {
+ napi_schedule(&droq->napi);
+ } else {
+ struct call_single_data *csd = &droq->csd;
+
+ csd->func = napi_schedule_wrapper;
+ csd->info = &droq->napi;
+ csd->flags = 0;
+
+ smp_call_function_single_async(droq->cpu_id, csd);
+ }
+}
+
+/**
+ * \brief Main NAPI poll function
+ * @param droq octeon output queue
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
+{
+ int work_done;
+ struct lio *lio = GET_LIO(droq->napi.dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
+ if (work_done < 0) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Receive work_done < 0, rxq:%d\n", droq->q_no);
+ goto octnet_napi_finish;
+ }
+
+ if (work_done > budget)
+ dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
+ __func__, work_done, budget);
+
+ return work_done;
+
+octnet_napi_finish:
+ napi_complete(&droq->napi);
+ octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
+ 0);
+ return 0;
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_droq *droq;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+
+ work_done = liquidio_napi_do_rx(droq, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+ }
+
+ return work_done;
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param net_device Net device
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+static inline int setup_io_queues(struct octeon_device *octeon_dev,
+ struct net_device *net_device)
+{
+ static int first_time = 1;
+ static struct octeon_droq_ops droq_ops;
+ static int cpu_id;
+ static int cpu_id_modulus;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ int q, q_no, retval = 0;
+ struct lio *lio;
+ int num_tx_descs;
+
+ lio = GET_LIO(net_device);
+ if (first_time) {
+ first_time = 0;
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+ }
+
+ /* set up DROQs. */
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q];
+
+ retval = octeon_setup_droq(octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx), NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ /* set up IQs. */
+ for (q = 0; q < lio->linfo.num_txpciq; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
+ (octeon_dev),
+ lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
+ num_tx_descs,
+ netdev_get_tx_queue(net_device, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Poll routine for checking transmit queue status
+ * @param work work_struct data structure
+ */
+static void octnet_poll_check_txq_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ return;
+
+ check_txq_status(lio);
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Sets up the txq poll check
+ * @param netdev network device
+ */
+static inline void setup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->txq_status_wq.wq = create_workqueue("txq-status");
+ if (!lio->txq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
+ return;
+ }
+ INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
+ octnet_poll_check_txq_status);
+ lio->txq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Net device open for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct_ptp_open(netdev);
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+ setup_tx_poll_fn(netdev);
+ start_txq(netdev);
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ try_module_get(THIS_MODULE);
+
+ /* tell Octeon to start forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 1);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+ netdev->name);
+
+ return 0;
+}
+
+/**
+ * \brief Net device stop for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct napi_struct *napi, *n;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ /* Inform that netif carrier is down */
+ lio->intf_open = 0;
+ lio->linfo.link.s.status = 0;
+
+ netif_carrier_off(netdev);
+
+ /* tell Octeon to stop forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 0);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ flush_workqueue(lio->txq_status_wq.wq);
+ destroy_workqueue(lio->txq_status_wq.wq);
+
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ /* This is a hack that allows DHCP to continue working. */
+ set_bit(__LINK_STATE_START, &lio->netdev->state);
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ txqs_stop(netdev);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ /* If command is successful, change the MACADDR. */
+ netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
+ CVM_CAST64(nctrl->udd[0]));
+ dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
+ netdev->name, CVM_CAST64(nctrl->udd[0]));
+ memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param2);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param2);
+ netdev->mtu = nctrl->ncmd.s.param2;
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
+
+/**
+ * \brief Converts a mask based on net device flags
+ * @param netdev network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+/**
+ * \brief Net device set_multicast_list
+ * @param netdev network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret, i;
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = get_new_flags(netdev);
+ nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ i = 0;
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
+ /* no need to swap bytes */
+
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+ nctrl.wait_time = 0;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+}
+
+/**
+ * \brief Net device set_mac_address
+ * @param netdev network device
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ if ((!is_valid_ether_addr(addr->sa_data)) ||
+ (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ return -EADDRNOTAVAIL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ nctrl.wait_time = 100;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+/**
+ * \brief Net device get_stats
+ * @param netdev network device
+ */
+static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct octeon_device *oct;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ iq_no = lio->linfo.txpciq[i];
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ oq_no = lio->linfo.rxpciq[i];
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ stats->rx_bytes = bytes;
+ stats->rx_packets = pkts;
+ stats->rx_dropped = drop;
+
+ return stats;
+}
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
+ int ret = 0;
+
+ /* Limit the MTU to make sure the ethernet packets are between 64 bytes
+ * and 65535 bytes
+ */
+ if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
+ (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
+ dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
+ (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
+ (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
+ return -1;
+ }
+
+ lio->mtu = new_mtu;
+
+ return 0;
+}
+
+/**
+ * \brief Handler for SIOCSHWTSTAMP ioctl
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config conf;
+ struct lio *lio = GET_LIO(netdev);
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ if (conf.flags)
+ return -EINVAL;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * \brief ioctl handler
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * \brief handle a Tx timestamp response
+ * @param status response status
+ * @param buf pointer to skb
+ */
+static void handle_timestamp(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct oct_timestamp_resp *resp;
+ struct lio *lio;
+ struct sk_buff *skb = (struct sk_buff *)buf;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ recv_buffer_free(skb);
+}
+
+/* \brief Send a data packet that will be timestamped
+ * @param oct octeon device
+ * @param ndata pointer to network data
+ * @param finfo pointer to private network data
+ */
+static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
+{
+ int retval;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_rdp *rdp;
+ struct lio *lio;
+ int ring_doorbell;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ ring_doorbell = !xmit_more;
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, ih->dlengsz, ndata->reqtype);
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+static inline int is_ipv4(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->version == 4);
+}
+
+static inline int is_vlan(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_8021Q);
+}
+
+static inline int is_ip_fragmented(struct sk_buff *skb)
+{
+ /* The Don't fragment and Reserved flag fields are ignored.
+ * IP is fragmented if
+ * - the More fragments bit is set (indicating this IP is a fragment
+ * with more to follow; the current offset could be 0 ).
+ * - ths offset field is non-zero.
+ */
+ return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
+}
+
+static inline int is_ipv6(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IPV6)) &&
+ (ipv6_hdr(skb)->version == 6);
+}
+
+static inline int is_with_extn_hdr(struct sk_buff *skb)
+{
+ return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
+ (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
+}
+
+static inline int is_tcpudp(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
+}
+
+static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ tag = crc32(0, &iphdr->protocol, 1);
+ tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+
+ tag = crc32(0, &ipv6hdr->nexthdr, 1);
+ tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+/** \brief Transmit networks packets to the Octeon interface
+ * @param skbuff skbuff struct to be passed to network layer.
+ * @param netdev pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct lio *lio;
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_device *oct;
+ struct oct_iq_stats *stats;
+ int cpu = 0, status = 0;
+ int q_idx = 0, iq_no = 0;
+ int xmit_more;
+ u32 tag = 0;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ if (netif_is_multiqueue(netdev)) {
+ cpu = skb->queue_mapping;
+ q_idx = (cpu & (lio->linfo.num_txpciq - 1));
+ iq_no = lio->linfo.txpciq[q_idx];
+ } else {
+ iq_no = lio->txq;
+ }
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.status) ||
+ (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.status);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = (void *)finfo;
+
+ ndata.q_no = iq_no;
+
+ if (netif_is_multiqueue(netdev)) {
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+ } else {
+ if (octnet_iq_is_full(oct, lio->txq)) {
+ /* defer sending if queue is full */
+ stats->tx_iq_busy++;
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ */
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.ifidx = lio->linfo.ifidx;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (is_ipv6(skb)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+
+ } else if (is_vlan(skb)) {
+ if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IP) &&
+ !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IPV6)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+ }
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ /* Offload checksum calculation for TCP/UDP packets */
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ int i, frags;
+ struct skb_frag_struct *frag;
+ struct octnic_gather *g;
+
+ spin_lock(&lio->lock);
+ g = (struct octnic_gather *)list_delete_head(&lio->glist);
+ spin_unlock(&lio->lock);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ dma_map_page(&oct->pci_dev->dev,
+ frag->page.p,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ i++;
+ }
+
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct octeon_instr_irh *irh =
+ (struct octeon_instr_irh *)&ndata.cmd.irh;
+ union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+
+ irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ }
+
+ xmit_more = skb->xmit_more;
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP)
+ stop_q(lio->netdev, q_idx);
+
+ netdev->trans_start = jiffies;
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ recv_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/** \brief Network device Tx timeout
+ * @param netdev pointer to network device
+ */
+static void liquidio_tx_timeout(struct net_device *netdev)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netdev->trans_start = jiffies;
+ txqs_wake(netdev);
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** \brief Net device fix features
+ * @param netdev pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /*Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ return request;
+}
+
+/** \brief Net device set features
+ * @param netdev pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if (!((netdev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+
+ return 0;
+}
+
+static struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats = liquidio_get_stats,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+};
+
+/** \brief Entry point for the liquidio module
+ */
+static int __init liquidio_init(void)
+{
+ int i;
+ struct handshake *hs;
+
+ init_completion(&first_stage);
+
+ octeon_init_device_list(conf_type);
+
+ if (liquidio_init_pci())
+ return -EINVAL;
+
+ wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion(&hs->init);
+ if (!hs->init_ok) {
+ /* init handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Failed to init device\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion_timeout(&hs->started,
+ msecs_to_jiffies(30000));
+ if (!hs->started_ok) {
+ /* starter handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Firmware failed to start\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int ifidx = 0;
+ union oct_link_status *ls;
+ int i;
+
+ if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
+ (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.ifidx);
+ goto nic_info_err;
+ }
+
+ ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+ update_link_status(oct->props[ifidx].netdev, ls);
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * \brief Setup network interfaces
+ * @param octeon_dev octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ struct lio *lio = NULL;
+ struct net_device *netdev;
+ u8 mac[6], i, j;
+ struct octeon_soft_command *sc;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+ struct octdev_props *props;
+ int retval, num_iqueues, num_oqueues, q_no;
+ u64 q_mask;
+ int num_cpus = num_online_cpus();
+ union oct_nic_if_cfg if_cfg;
+ unsigned int base_queue;
+ unsigned int gmx_port_id;
+ u32 resp_size, ctx_size;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, 0,
+ resp_size, ctx_size);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ num_iqueues =
+ CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ num_oqueues =
+ CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ base_queue =
+ CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
+ gmx_port_id =
+ CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
+ if (num_iqueues > num_cpus)
+ num_iqueues = num_cpus;
+ if (num_oqueues > num_cpus)
+ num_oqueues = num_cpus;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "requesting config for interface %d, iqs %d, oqs %d\n",
+ i, num_iqueues, num_oqueues);
+ ACCESS_ONCE(ctx->cond) = 0;
+ ctx->octeon_id = lio_get_device_id(octeon_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = num_iqueues;
+ if_cfg.s.num_oqueues = num_oqueues;
+ if_cfg.s.base_queue = base_queue;
+ if_cfg.s.gmx_port_id = gmx_port_id;
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, i,
+ if_cfg.u64, 0);
+
+ sc->callback = if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto setup_nic_dev_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ sleep_cond(&ctx->wc, &ctx->cond);
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask,
+ resp->cfg_info.oqmask);
+ goto setup_nic_dev_fail;
+ }
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ props = &octeon_dev->props[i];
+ props->netdev = netdev;
+
+ if (num_iqueues > 1)
+ lionetdevops.ndo_select_queue = select_q;
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->linfo.ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = resp->cfg_info.ifidx;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+ q_mask = resp->cfg_info.oqmask;
+ /* q_mask is 0-based and already verified mask is nonzero */
+ for (j = 0; j < num_oqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.rxpciq[j] = q_no;
+ }
+ q_mask = resp->cfg_info.iqmask;
+ for (j = 0; j < num_iqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.txpciq[j] = q_no;
+ }
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ netdev->features = lio->dev_capability;
+ netdev->vlan_features = lio->dev_capability;
+
+ netdev->hw_features = lio->dev_capability;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+ spin_lock_init(&lio->lock);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < 6; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+
+ ether_addr_copy(netdev->dev_addr, mac);
+
+ if (setup_io_queues(octeon_dev, netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0];
+ lio->rxq = lio->linfo.rxpciq[0];
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (setup_glist(lio)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+
+ if ((debug != -1) && (debug & NETIF_MSG_HW))
+ liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ start_txq(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ octeon_free_soft_command(octeon_dev, sc);
+ }
+
+ return 0;
+
+setup_nic_dev_fail:
+
+ octeon_free_soft_command(octeon_dev, sc);
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+ return -ENODEV;
+}
+
+/**
+ * \brief initialize the NIC
+ * @param oct octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ struct oct_intrmod_cfg *intrmod_cfg;
+ int retval = 0;
+ int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well
+ */
+ /* run port_config command for each port */
+ oct->ifcount = num_nic_ports;
+
+ memset(oct->props, 0,
+ sizeof(struct octdev_props) * num_nic_ports);
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ liquidio_ptp_init(oct);
+
+ /* Initialize interrupt moderation params */
+ intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
+ intrmod_cfg->intrmod_enable = 1;
+ intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * \brief starter callback that invokes the remaining initialization work after
+ * the NIC is up and running.
+ * @param octptr work struct work_struct
+ */
+static void nic_starter(struct work_struct *work)
+{
+ struct octeon_device *oct;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+
+ oct = (struct octeon_device *)wk->ctxptr;
+
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
+ return;
+
+ /* If the status of the device is CORE_OK, the core
+ * application has reported its application type. Call
+ * any registered handlers now and move to the RUNNING
+ * state.
+ */
+ if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
+ schedule_delayed_work(&oct->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+ return;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
+ dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
+
+ if (liquidio_init_nic_module(oct))
+ dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
+ else
+ handshake[oct->octeon_id].started_ok = 1;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Unexpected application running on NIC (%d). Check firmware.\n",
+ oct->app_mode);
+ }
+
+ complete(&handshake[oct->octeon_id].started);
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev octeon device
+ */
+static int octeon_device_init(struct octeon_device *octeon_dev)
+{
+ int j, ret;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)octeon_dev->priv;
+ atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(octeon_dev))
+ return 1;
+
+ /* Identify the Octeon type and map the BAR address space. */
+ if (octeon_chip_specific_setup(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+
+ octeon_dev->app_mode = CVM_DRV_INVALID_APP;
+
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(octeon_dev))
+ return 1;
+
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_CORE_DRV_ACTIVE,
+ octeon_core_drv_init,
+ octeon_dev);
+
+ INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
+ octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
+ schedule_delayed_work(&octeon_dev->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ octeon_set_io_queues_off(octeon_dev);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "instruction queue initialization failed\n");
+ /* On error, release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_iqs; j++)
+ octeon_delete_instr_queue(octeon_dev, j);
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
+ /* Release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ octeon_delete_droq(octeon_dev, j);
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
+
+ /* The input and output queue registers were setup earlier (the queues
+ * were not enabled). Any additional registers that need to be
+ * programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
+
+ /* Initialize the tasklet that handles output queue packet processing.*/
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
+ tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
+ (unsigned long)octeon_dev);
+
+ /* Setup the interrupt handler and record the INT SUM register address
+ */
+ octeon_setup_interrupt(octeon_dev);
+
+ /* Enable Octeon device interrupts */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+
+ /* Enable the input and output queues for this Octeon device */
+ octeon_dev->fn_list.enable_io_queues(octeon_dev);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+
+ if (ddr_timeout == 0) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
+
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+
+ /* Wait for the octeon to initialize DDR after the soft-reset. */
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
+ return 1;
+ }
+
+ if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+
+ handshake[octeon_dev->octeon_id].init_ok = 1;
+ complete(&handshake[octeon_dev->octeon_id].init);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent after
+ * the output queue is enabled.
+ */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ writel(octeon_dev->droq[j]->max_count,
+ octeon_dev->droq[j]->pkts_credit_reg);
+
+ /* Packets can start arriving on the output queues from this point. */
+
+ return 0;
+}
+
+/**
+ * \brief Exits the module
+ */
+static void __exit liquidio_exit(void)
+{
+ liquidio_deinit_pci();
+
+ pr_info("LiquidIO network module is now unloaded\n");
+}
+
+module_init(liquidio_init);
+module_exit(liquidio_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
new file mode 100644
index 000000000000..0ac347ccc8ba
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -0,0 +1,673 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file liquidio_common.h
+ * \brief Common: Structures and macros used in PCI-NIC package by core and
+ * host driver.
+ */
+
+#ifndef __LIQUIDIO_COMMON_H__
+#define __LIQUIDIO_COMMON_H__
+
+#include "octeon_config.h"
+
+#define LIQUIDIO_VERSION "1.1.9"
+#define LIQUIDIO_MAJOR_VERSION 1
+#define LIQUIDIO_MINOR_VERSION 1
+#define LIQUIDIO_MICRO_VERSION 9
+
+#define CONTROL_IQ 0
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ ORDERED_TAG = 0,
+ ATOMIC_TAG = 1,
+ NULL_TAG = 2,
+ NULL_NULL_TAG = 3
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* Opcodes used by host driver/apps to perform operations on the core.
+ * These are used to identify the major subsystem that the operation
+ * is for.
+ */
+#define OPCODE_CORE 0 /* used for generic core operations */
+#define OPCODE_NIC 1 /* used for NIC operations */
+#define OPCODE_LAST OPCODE_NIC
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** OPCODE_CORE subcodes. For future use. */
+
+/** OPCODE_NIC subcodes */
+
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01
+#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */
+#define OPCODE_NIC_CMD 0x03
+#define OPCODE_NIC_INFO 0x04
+#define OPCODE_NIC_PORT_STATS 0x05
+#define OPCODE_NIC_MDIO45 0x06
+#define OPCODE_NIC_TIMESTAMP 0x07
+#define OPCODE_NIC_INTRMOD_CFG 0x08
+#define OPCODE_NIC_IF_CFG 0x09
+
+#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
+
+#define OPCODE_SLOW_PATH(rh) \
+ (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \
+ OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA))
+
+/* Application codes advertised by the core driver initialization packet. */
+#define CVM_DRV_APP_START 0x0
+#define CVM_DRV_NO_APP 0
+#define CVM_DRV_APP_COUNT 0x2
+#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0)
+#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1)
+#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
+#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+#define INCR_INDEX(index, count, max) \
+do { \
+ if (((index) + (count)) >= (max)) \
+ index = ((index) + (count)) - (max); \
+ else \
+ index += (count); \
+} while (0)
+
+#define INCR_INDEX_BY1(index, max) \
+do { \
+ if ((++(index)) == (max)) \
+ index = 0; \
+} while (0)
+
+#define DECR_INDEX(index, count, max) \
+do { \
+ if ((count) > (index)) \
+ index = ((max) - ((count - index))); \
+ else \
+ index -= count; \
+} while (0)
+
+#define OCT_BOARD_NAME 32
+#define OCT_SERIAL_LEN 64
+
+/* Structure used by core driver to send indication that the Octeon
+ * application is ready.
+ */
+struct octeon_core_setup {
+ u64 corefreq;
+
+ char boardname[OCT_BOARD_NAME];
+
+ char board_serial_number[OCT_SERIAL_LEN];
+
+ u64 board_rev_major;
+
+ u64 board_rev_minor;
+
+};
+
+/*--------------------------- SCATTER GATHER ENTRY -----------------------*/
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * a Octeon input instruction has this format.
+ */
+struct octeon_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr.*/
+ union {
+ u16 size[4];
+ u64 size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ u64 ptr[4];
+
+};
+
+#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry))
+
+/* \brief Add size to gather list
+ * @param sg_entry scatter/gather entry
+ * @param size size to add
+ * @param pos position to add it.
+ */
+static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
+ u16 size,
+ u32 pos)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ sg_entry->u.size[pos] = size;
+#else
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/*------------------------- End Scatter/Gather ---------------------------*/
+
+#define OCTNET_FRM_PTP_HEADER_SIZE 8
+#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
+
+#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
+
+/** NIC Commands are sent using this Octeon Input Queue */
+#define OCTNET_CMD_Q 0
+
+/* NIC Command types */
+#define OCTNET_CMD_CHANGE_MTU 0x1
+#define OCTNET_CMD_CHANGE_MACADDR 0x2
+#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
+#define OCTNET_CMD_RX_CTL 0x4
+
+#define OCTNET_CMD_SET_MULTI_LIST 0x5
+#define OCTNET_CMD_CLEAR_STATS 0x6
+
+/* command for setting the speed, duplex & autoneg */
+#define OCTNET_CMD_SET_SETTINGS 0x7
+#define OCTNET_CMD_SET_FLOW_CTL 0x8
+
+#define OCTNET_CMD_MDIO_READ_WRITE 0x9
+#define OCTNET_CMD_GPIO_ACCESS 0xA
+#define OCTNET_CMD_LRO_ENABLE 0xB
+#define OCTNET_CMD_LRO_DISABLE 0xC
+#define OCTNET_CMD_SET_RSS 0xD
+#define OCTNET_CMD_WRITE_SA 0xE
+#define OCTNET_CMD_DELETE_SA 0xF
+#define OCTNET_CMD_UPDATE_SA 0x12
+
+#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10
+#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11
+#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13
+#define OCTNET_CMD_VERBOSE_ENABLE 0x14
+#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define CNNIC_L4SUM_VERIFIED 0x1
+#define CNNIC_IPSUM_VERIFIED 0x2
+#define CNNIC_TUN_CSUM_VERIFIED 0x4
+#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED)
+
+/*LROIPV4 and LROIPV6 Flags*/
+#define OCTNIC_LROIPV4 0x1
+#define OCTNIC_LROIPV6 0x2
+
+/* Interface flags communicated between host driver and core app. */
+enum octnet_ifflags {
+ OCTNET_IFFLAG_PROMISC = 0x01,
+ OCTNET_IFFLAG_ALLMULTI = 0x02,
+ OCTNET_IFFLAG_MULTICAST = 0x04,
+ OCTNET_IFFLAG_BROADCAST = 0x08,
+ OCTNET_IFFLAG_UNICAST = 0x10
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCT_NET_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octnet_cmd {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 cmd:5;
+
+ u64 more:6; /* How many udd words follow the command */
+
+ u64 param1:29;
+
+ u64 param2:16;
+
+ u64 param3:8;
+
+#else
+
+ u64 param3:8;
+
+ u64 param2:16;
+
+ u64 param1:29;
+
+ u64 more:6;
+
+ u64 cmd:5;
+
+#endif
+ } s;
+
+};
+
+#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+
+/** Instruction Header */
+struct octeon_instr_ih {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Tag Value */
+ u64 tag:32;
+#else
+ /** Tag Value */
+ u64 tag:32;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 opcode:4;
+ u64 rflag:1;
+ u64 subcode:7;
+ u64 len:3;
+ u64 rid:13;
+ u64 reserved:4;
+ u64 ossp:32; /* opcode/subcode specific parameters */
+#else
+ u64 ossp:32; /* opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3;
+ u64 subcode:7;
+ u64 rflag:1;
+ u64 opcode:4;
+#endif
+};
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:49;
+ u64 pcie_port:3;
+ u64 rlen:12;
+#else
+ u64 rlen:12;
+ u64 pcie_port:3;
+ u64 reserved:49;
+#endif
+};
+
+/** Receive Header */
+union octeon_rh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 u64;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 reserved:4;
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 extra:24;
+ u64 link:8;
+ u64 csum_verified:3; /** checksum verified. */
+ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ } r_dh;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 num_gmx_ports:8;
+ u64 max_nic_ports:8;
+ u64 app_cap_flags:4;
+ u64 app_mode:16;
+ } r_core_drv_init;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13;
+ u64 reserved:4;
+ u64 extra:25;
+ u64 ifidx:7;
+ } r_nic_info;
+#else
+ u64 u64;
+ struct {
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r;
+ struct {
+ u64 has_hwtstamp:1; /** 1 = has hwtstamp */
+ u64 csum_verified:3; /** checksum verified. */
+ u64 link:8;
+ u64 extra:24;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_dh;
+ struct {
+ u64 app_mode:16;
+ u64 app_cap_flags:4;
+ u64 max_nic_ports:8;
+ u64 num_gmx_ports:8;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_core_drv_init;
+ struct {
+ u64 ifidx:7;
+ u64 extra:25;
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_nic_info;
+#endif
+};
+
+#define OCT_RH_SIZE (sizeof(union octeon_rh))
+
+#define OCT_PKT_PARAM_IPV4OPTS 1
+#define OCT_PKT_PARAM_IPV6EXTHDR 2
+
+union octnic_packet_params {
+ u32 u32;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved:6;
+ u32 tnl_csum:1;
+ u32 ip_csum:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ipsec_ops:4;
+ u32 tsflag:1;
+ u32 csoffset:9;
+ u32 ifidx:8;
+#else
+ u32 ifidx:8;
+ u32 csoffset:9;
+ u32 tsflag:1;
+ u32 ipsec_ops:4;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+ u32 reserved:6;
+#endif
+ } s;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union oct_link_status {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 duplex:8;
+ u64 status:8;
+ u64 mtu:16;
+ u64 speed:16;
+ u64 autoneg:1;
+ u64 interface:4;
+ u64 pause:1;
+ u64 reserved:10;
+#else
+ u64 reserved:10;
+ u64 pause:1;
+ u64 interface:4;
+ u64 autoneg:1;
+ u64 speed:16;
+ u64 mtu:16;
+ u64 status:8;
+ u64 duplex:8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct oct_link_info {
+ union oct_link_status link;
+ u64 hw_addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gmxport;
+ u8 rsvd[3];
+ u8 num_txpciq;
+ u8 num_rxpciq;
+ u8 ifidx;
+#else
+ u8 ifidx;
+ u8 num_rxpciq;
+ u8 num_txpciq;
+ u8 rsvd[3];
+ u16 gmxport;
+#endif
+
+ u8 txpciq[MAX_IOQS_PER_NICIF];
+ u8 rxpciq[MAX_IOQS_PER_NICIF];
+};
+
+#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
+
+struct liquidio_if_cfg_info {
+ u64 ifidx;
+ u64 iqmask; /** mask for IQs enabled for the port */
+ u64 oqmask; /** mask for OQs enabled for the port */
+ struct oct_link_info linfo; /** initial link information */
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_rx_stats {
+ /* link-level stats */
+ u64 total_rcvd;
+ u64 bytes_rcvd;
+ u64 total_bcst;
+ u64 total_mcst;
+ u64 runts;
+ u64 ctl_rcvd;
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 dmac_drop;
+ u64 fcs_err;
+ u64 jabber_err;
+ u64 l2_err;
+ u64 frame_err;
+
+ /* firmware stats */
+ u64 fw_total_rcvd;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+ u64 fw_lro_pkts; /* Number of packets that are LROed */
+ u64 fw_lro_octs; /* Number of octets that are LROed */
+ u64 fw_total_lro; /* Number of LRO packets formed */
+ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ /* intrmod: packet forward rate */
+ u64 fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_tx_stats {
+ /* link-level stats */
+ u64 total_pkts_sent;
+ u64 total_bytes_sent;
+ u64 mcast_pkts_sent;
+ u64 bcast_pkts_sent;
+ u64 ctl_sent;
+ u64 one_collision_sent; /* Packets sent after one collision*/
+ u64 multi_collision_sent; /* Packets sent after multiple collision*/
+ u64 max_collision_fail; /* Packets not sent due to max collisions */
+ u64 max_deferral_fail; /* Packets not sent due to max deferrals */
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 runts;
+ u64 total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ u64 fw_total_sent;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+};
+
+struct oct_link_stats {
+ struct nic_rx_stats fromwire;
+ struct nic_tx_stats fromhost;
+
+};
+
+#define LIO68XX_LED_CTRL_ADDR 0x3501
+#define LIO68XX_LED_CTRL_CFGON 0x1f
+#define LIO68XX_LED_CTRL_CFGOFF 0x100
+#define LIO68XX_LED_BEACON_ADDR 0x3508
+#define LIO68XX_LED_BEACON_CFGON 0x47fd
+#define LIO68XX_LED_BEACON_CFGOFF 0x11fc
+#define VITESSE_PHY_GPIO_DRIVEON 0x1
+#define VITESSE_PHY_GPIO_CFG 0x8
+#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
+#define VITESSE_PHY_GPIO_HIGH 0x2
+#define VITESSE_PHY_GPIO_LOW 0x3
+
+struct oct_mdio_cmd {
+ u64 op;
+ u64 mdio_addr;
+ u64 value1;
+ u64 value2;
+ u64 value3;
+};
+
+#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+
+#define LIO_INTRMOD_CHECK_INTERVAL 1
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
+#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
+#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
+#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
+#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
+
+struct oct_intrmod_cfg {
+ u64 intrmod_enable;
+ u64 intrmod_check_intrvl;
+ u64 intrmod_maxpkt_ratethr;
+ u64 intrmod_minpkt_ratethr;
+ u64 intrmod_maxcnt_trigger;
+ u64 intrmod_maxtmr_trigger;
+ u64 intrmod_mincnt_trigger;
+ u64 intrmod_mintmr_trigger;
+};
+
+#define BASE_QUEUE_NOT_REQUESTED 65535
+
+union oct_nic_if_cfg {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 base_queue:16;
+ u64 num_iqueues:16;
+ u64 num_oqueues:16;
+ u64 gmx_port_id:8;
+ u64 reserved:8;
+#else
+ u64 reserved:8;
+ u64 gmx_port_id:8;
+ u64 num_oqueues:16;
+ u64 num_iqueues:16;
+ u64 base_queue:16;
+#endif
+ } s;
+};
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
new file mode 100644
index 000000000000..93819bd8602b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -0,0 +1,57 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#ifndef _LIQUIDIO_IMAGE_H_
+#define _LIQUIDIO_IMAGE_H_
+
+#define LIO_MAX_FW_TYPE_LEN (8)
+#define LIO_MAX_FW_FILENAME_LEN (256)
+#define LIO_FW_DIR "liquidio/"
+#define LIO_FW_BASE_NAME "lio_"
+#define LIO_FW_NAME_SUFFIX ".bin"
+#define LIO_FW_NAME_TYPE_NIC "nic"
+#define LIO_FW_NAME_TYPE_NONE "none"
+#define LIO_MAX_FIRMWARE_VERSION_LEN 16
+
+#define LIO_MAX_BOOTCMD_LEN 1024
+#define LIO_MAX_IMAGES 16
+#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */
+struct octeon_firmware_desc {
+ __be64 addr;
+ __be32 len;
+ __be32 crc32; /* crc32 of image */
+};
+
+/* Following the header is a list of 64-bit aligned binary images,
+ * as described by the desc field.
+ * Numeric fields are in network byte order.
+ */
+struct octeon_firmware_file_header {
+ __be32 magic;
+ char version[LIO_MAX_FIRMWARE_VERSION_LEN];
+ char bootcmd[LIO_MAX_BOOTCMD_LEN];
+ __be32 num_images;
+ struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
+ __be32 pad;
+ __be32 crc32; /* header checksum */
+};
+
+#endif /* _LIQUIDIO_IMAGE_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
new file mode 100644
index 000000000000..62a8dd5cd3dc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -0,0 +1,424 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_config.h
+ * \brief Host Driver: Configuration data structures for the host driver.
+ */
+
+#ifndef __OCTEON_CONFIG_H__
+#define __OCTEON_CONFIG_H__
+
+/*--------------------------CONFIG VALUES------------------------*/
+
+/* The following macros affect the way the driver data structures
+ * are generated for Octeon devices.
+ * They can be modified.
+ */
+
+/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
+ * multiple(<= MAX_OCTEON_NICIF) Miniports
+ */
+#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
+#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
+#define MAX_OCTEON_MULTICAST_ADDR 32
+
+/* CN6xxx IQ configuration macros */
+#define CN6XXX_MAX_INPUT_QUEUES 32
+#define CN6XXX_MAX_IQ_DESCRIPTORS 2048
+#define CN6XXX_DB_MIN 1
+#define CN6XXX_DB_MAX 8
+#define CN6XXX_DB_TIMEOUT 1
+
+/* CN6xxx OQ configuration macros */
+#define CN6XXX_MAX_OUTPUT_QUEUES 32
+#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
+#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+
+#define CN6XXX_OQ_INTR_PKT 64
+#define CN6XXX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_66XX 2
+#define DEFAULT_NUM_NIC_PORTS_68XX 4
+#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+
+/* common OCTEON configuration macros */
+#define CN6XXX_CFG_IO_QUEUES 32
+#define OCTEON_32BYTE_INSTR 32
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_MAX_BASE_IOQ 4
+#define OCTEON_OQ_BUFPTR_MODE 0
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+#define OCTEON_DMA_INTR_PKT 64
+#define OCTEON_DMA_INTR_TIME 1000
+
+#define MAX_TXQS_PER_INTF 8
+#define MAX_RXQS_PER_INTF 8
+#define DEF_TXQS_PER_INTF 4
+#define DEF_RXQS_PER_INTF 4
+
+#define INVALID_IOQ_NO 0xff
+
+#define DEFAULT_POW_GRP 0
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
+#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+
+#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
+#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
+#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val
+#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val
+
+#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt)
+#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time)
+#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports)
+#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs)
+#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs)
+#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size)
+
+#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_txqs)
+#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_txqs)
+#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_rxqs)
+#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rxqs)
+#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs)
+#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs)
+#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].rx_buf_size)
+#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].base_queue)
+#define CFG_GET_GMXID_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].gmx_port_id)
+
+#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp)
+#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.host_link_query_interval)
+#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.oct_link_query_interval)
+#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
+
+/* Max IOQs per OCTEON Link */
+#define MAX_IOQS_PER_NICIF 32
+
+enum lio_card_type {
+ LIO_210SV = 0, /* Two port, 66xx */
+ LIO_210NV, /* Two port, 68xx */
+ LIO_410NV /* Four port, 68xx */
+};
+
+#define LIO_210SV_NAME "210sv"
+#define LIO_210NV_NAME "210nv"
+#define LIO_410NV_NAME "410nv"
+
+/** Structure to define the configuration attributes for each Input queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_iq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:32;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+#else
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ u64 reserved:32;
+#endif
+};
+
+/** Structure to define the configuration attributes for each Output queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_oq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:16;
+
+ u64 pkts_per_intr:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+#else
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ u64 pkts_per_intr:16;
+
+ u64 reserved:16;
+#endif
+
+};
+
+/** This structure conatins the NIC link configuration attributes,
+ * common for all the OCTEON Modles.
+ */
+struct octeon_nic_if_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:56;
+
+ u64 base_queue:16;
+
+ u64 gmx_port_id:8;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+#else
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ u64 gmx_port_id:8;
+
+ u64 base_queue:16;
+
+ u64 reserved:56;
+#endif
+
+};
+
+/** Structure to define the configuration attributes for meta data.
+ * Applicable to all Octeon processors.
+ */
+
+struct octeon_misc_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Host link status polling period */
+ u64 host_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 oct_link_query_interval:32;
+
+ u64 enable_sli_oq_bp:1;
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+#else
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+ /** BP for SLI OQ */
+ u64 enable_sli_oq_bp:1;
+ /** Host link status polling period */
+ u64 oct_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 host_link_query_interval:32;
+#endif
+};
+
+/** Structure to define the configuration for all OCTEON processors. */
+struct octeon_config {
+ u16 card_type;
+ char *card_name;
+
+ /** Input Queue attributes. */
+ struct octeon_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct octeon_oq_config oq;
+
+ /** NIC Port Configuration */
+ struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF];
+
+ /** Miscellaneous attributes */
+ struct octeon_misc_config misc;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+
+};
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
+#define MAX_BAR1_MAP_INDEX 2
+#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024)
+
+/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
+ * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
+ */
+#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \
+ OCTEON_BAR1_ENTRY_SIZE)
+
+/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
+ */
+#define MAX_RESPONSE_LISTS 4
+
+/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
+ * dispatch table.
+ */
+#define OPCODE_MASK_BITS 6
+
+/* Mask for the 6-bit lookup hash */
+#define OCTEON_OPCODE_MASK 0x3f
+
+/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
+#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+
+#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
new file mode 100644
index 000000000000..466147e409c9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -0,0 +1,723 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/**
+ * @file octeon_console.c
+ */
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void octeon_remote_lock(void);
+static void octeon_remote_unlock(void);
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags);
+
+#define MIN(a, b) min((a), (b))
+#define CAST_ULL(v) ((u64)(v))
+
+#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
+#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
+#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000
+#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100
+#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248
+
+#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001
+#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002
+
+/** Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/** minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/** CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console"
+#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
+
+/* First three members of cvmx_bootmem_desc are left in original
+** positions for backwards compatibility.
+** Assumes big endian target
+*/
+struct cvmx_bootmem_desc {
+ /** spinlock to control access to list */
+ u32 lock;
+
+ /** flags for indicating various conditions */
+ u32 flags;
+
+ u64 head_addr;
+
+ /** incremented changed when incompatible changes made */
+ u32 major_version;
+
+ /** incremented changed when compatible changes made,
+ * reset to zero when major incremented
+ */
+ u32 minor_version;
+
+ u64 app_data_addr;
+ u64 app_data_size;
+
+ /** number of elements in named blocks array */
+ u32 nb_num_blocks;
+
+ /** length of name array in bootmem blocks */
+ u32 named_block_name_len;
+
+ /** address of named memory block descriptors */
+ u64 named_block_array_addr;
+};
+
+/* Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ */
+struct octeon_pci_console {
+ u64 input_base_addr;
+ u32 input_read_index;
+ u32 input_write_index;
+ u64 output_base_addr;
+ u32 output_read_index;
+ u32 output_write_index;
+ u32 lock;
+ u32 buf_size;
+};
+
+/* This is the main container structure that contains all the information
+ * about all PCI consoles. The address of this structure is passed to various
+ * routines that operation on PCI consoles.
+ */
+struct octeon_pci_console_desc {
+ u32 major_version;
+ u32 minor_version;
+ u32 lock;
+ u32 flags;
+ u32 num_consoles;
+ u32 pad;
+ /* must be 64 bit aligned here... */
+ /* Array of addresses of octeon_pci_console structures */
+ u64 console_addr_array[0];
+ /* Implicit storage for console_addr_array */
+};
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc to read.
+ * Regardless of the type of the field, the return type is always
+ * a u64.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \
+ __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \
+ offsetof(struct cvmx_bootmem_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
+
+#define __cvmx_bootmem_lock(flags)
+#define __cvmx_bootmem_unlock(flags)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc to read. Regardless of the type
+ * of the field, the return type is always a u64. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \
+ __cvmx_bootmem_desc_get(oct, addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param oct Pointer to current octeon device
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
+ u64 base,
+ u32 offset,
+ u32 size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return octeon_read_device_mem32(oct, base);
+ case 8:
+ return octeon_read_device_mem64(oct, base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
+ u64 addr,
+ char *str,
+ u32 len)
+{
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+ octeon_pci_read_core_mem(oct, addr, str, len);
+ str[len] = 0;
+}
+
+/* See header file for descriptions of functions */
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(struct octeon_device *oct,
+ u32 exact_match)
+{
+ u32 major_version;
+ u32 minor_version;
+
+ if (!oct->bootmem_desc_addr)
+ oct->bootmem_desc_addr =
+ octeon_read_device_mem64(oct,
+ BOOTLOADER_PCI_READ_DESC_ADDR);
+ major_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version);
+ minor_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version);
+ dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
+ major_version);
+ if ((major_version > 3) ||
+ (exact_match && major_version != exact_match)) {
+ dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
+ major_version, minor_version,
+ CAST_ULL(oct->bootmem_desc_addr));
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static const struct cvmx_bootmem_named_block_desc
+*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
+ const char *name, u32 flags)
+{
+ struct cvmx_bootmem_named_block_desc *desc =
+ &oct->bootmem_named_block_desc;
+ u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
+
+ if (named_addr) {
+ desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ base_addr);
+ desc->size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size);
+ strncpy(desc->name, name, sizeof(desc->name));
+ desc->name[sizeof(desc->name) - 1] = 0;
+ return &oct->bootmem_named_block_desc;
+ } else {
+ return NULL;
+ }
+}
+
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags)
+{
+ u64 result = 0;
+
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(oct, 3)) {
+ u32 i;
+ u64 named_block_array_addr =
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct,
+ named_block_array_addr);
+ u32 num_blocks = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks);
+ u32 name_length = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len);
+ u64 named_addr = named_block_array_addr;
+
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ size);
+ if (name && named_size) {
+ char *name_tmp =
+ kmalloc(name_length + 1, GFP_KERNEL);
+ CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
+ name_tmp,
+ name_length);
+ if (!strncmp(name, name_tmp, name_length)) {
+ result = named_addr;
+ kfree(name_tmp);
+ break;
+ }
+ kfree(name_tmp);
+ } else if (!name && !named_size) {
+ result = named_addr;
+ break;
+ }
+
+ named_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+ }
+ __cvmx_bootmem_unlock(flags);
+ return result;
+}
+
+/**
+ * Find a named block on the remote Octeon
+ *
+ * @param name Name of block to find
+ * @param base_addr Address the block is at (OUTPUT)
+ * @param size The size of the block (OUTPUT)
+ *
+ * @return Zero on success, One on failure.
+ */
+static int octeon_named_block_find(struct octeon_device *oct, const char *name,
+ u64 *base_addr, u64 *size)
+{
+ const struct cvmx_bootmem_named_block_desc *named_block;
+
+ octeon_remote_lock();
+ named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
+ octeon_remote_unlock();
+ if (named_block) {
+ *base_addr = named_block->base_addr;
+ *size = named_block->size;
+ return 0;
+ }
+ return 1;
+}
+
+static void octeon_remote_lock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+static void octeon_remote_unlock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths)
+{
+ u32 len = strlen(cmd_str);
+
+ dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
+
+ if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
+ dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n",
+ BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
+ return -1;
+ }
+
+ if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) {
+ dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n");
+ return -1;
+ }
+
+ /* Write command to bootloader */
+ octeon_remote_lock();
+ octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
+ (u8 *)cmd_str, len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
+ len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
+ OCTEON_PCI_IO_BUF_OWNER_OCTEON);
+
+ /* Bootloader should accept command very quickly
+ * if it really was ready
+ */
+ if (octeon_wait_for_bootloader(oct, 200) != 0) {
+ octeon_remote_unlock();
+ dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n");
+ return -1;
+ }
+ octeon_remote_unlock();
+ return 0;
+}
+
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths)
+{
+ dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n",
+ wait_time_hundredths);
+
+ if (octeon_mem_access_ok(oct))
+ return -1;
+
+ while (wait_time_hundredths > 0 &&
+ octeon_read_device_mem32(oct,
+ BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR)
+ != OCTEON_PCI_IO_BUF_OWNER_HOST) {
+ if (--wait_time_hundredths <= 0)
+ return -1;
+ schedule_timeout_uninterruptible(HZ / 100);
+ }
+ return 0;
+}
+
+static void octeon_console_handle_result(struct octeon_device *oct,
+ size_t console_num,
+ char *buffer, s32 bytes_read)
+{
+ struct octeon_console *console;
+
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+}
+
+static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES];
+
+static void output_console_line(struct octeon_device *oct,
+ struct octeon_console *console,
+ size_t console_num,
+ char *console_buffer,
+ s32 bytes_read)
+{
+ char *line;
+ s32 i;
+
+ line = console_buffer;
+ for (i = 0; i < bytes_read; i++) {
+ /* Output a line at a time, prefixed */
+ if (console_buffer[i] == '\n') {
+ console_buffer[i] = '\0';
+ if (console->leftover[0]) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s%s\n",
+ console_num, console->leftover,
+ line);
+ console->leftover[0] = '\0';
+ } else {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, line);
+ }
+ line = &console_buffer[i + 1];
+ }
+ }
+
+ /* Save off any leftovers */
+ if (line != &console_buffer[bytes_read]) {
+ console_buffer[bytes_read] = '\0';
+ strcpy(console->leftover, line);
+ }
+}
+
+static void check_console(struct work_struct *work)
+{
+ s32 bytes_read, tries, total_read;
+ struct octeon_console *console;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ size_t console_num = wk->ctxul;
+ u32 delay;
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct, console_num, console_buffer,
+ sizeof(console_buffer) - 1, 0);
+ if (bytes_read > 0) {
+ total_read += bytes_read;
+ if (console->waiting) {
+ octeon_console_handle_result(oct, console_num,
+ console_buffer,
+ bytes_read);
+ }
+ if (octeon_console_debug_enabled(console_num)) {
+ output_console_line(oct, console, console_num,
+ console_buffer, bytes_read);
+ }
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if (octeon_console_debug_enabled(console_num) &&
+ (total_read == 0) && (console->leftover[0])) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, console->leftover);
+ console->leftover[0] = '\0';
+ }
+
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
+}
+
+int octeon_init_consoles(struct octeon_device *oct)
+{
+ int ret = 0;
+ u64 addr, size;
+
+ ret = octeon_mem_access_ok(oct);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Memory access not okay'\n");
+ return ret;
+ }
+
+ ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr,
+ &size);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n",
+ OCTEON_PCI_CONSOLE_BLOCK_NAME);
+ return ret;
+ }
+
+ /* num_consoles > 0, is an indication that the consoles
+ * are accessible
+ */
+ oct->num_consoles = octeon_read_device_mem32(oct,
+ addr + offsetof(struct octeon_pci_console_desc,
+ num_consoles));
+ oct->console_desc_addr = addr;
+
+ dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n",
+ oct->num_consoles);
+
+ return ret;
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num)
+{
+ int ret = 0;
+ u32 delay;
+ u64 coreaddr;
+ struct delayed_work *work;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev,
+ "trying to read from console number %d when only 0 to %d exist\n",
+ console_num, oct->num_consoles);
+ } else {
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+
+ coreaddr = oct->console_desc_addr + console_num * 8 +
+ offsetof(struct octeon_pci_console_desc,
+ console_addr_array);
+ console->addr = octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ buf_size);
+ console->buffer_size = octeon_read_device_mem32(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ input_base_addr);
+ console->input_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ output_base_addr);
+ console->output_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ console->leftover[0] = '\0';
+
+ work = &oct->console_poll_work[console_num].work;
+
+ INIT_DELAYED_WORK(work, check_console);
+ oct->console_poll_work[console_num].ctxptr = (void *)oct;
+ oct->console_poll_work[console_num].ctxul = console_num;
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+ schedule_delayed_work(work, msecs_to_jiffies(delay));
+
+ if (octeon_console_debug_enabled(console_num)) {
+ ret = octeon_console_send_cmd(oct,
+ "setenv pci_console_active 1",
+ 2000);
+ }
+
+ console->active = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Removes all consoles
+ *
+ * @param oct octeon device
+ */
+void octeon_remove_consoles(struct octeon_device *oct)
+{
+ u32 i;
+ struct octeon_console *console;
+
+ for (i = 0; i < oct->num_consoles; i++) {
+ console = &oct->console[i];
+
+ if (!console->active)
+ continue;
+
+ cancel_delayed_work_sync(&oct->console_poll_work[i].
+ work);
+ console->addr = 0;
+ console->buffer_size = 0;
+ console->input_base_addr = 0;
+ console->output_base_addr = 0;
+ }
+
+ oct->num_consoles = 0;
+}
+
+static inline int octeon_console_free_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+static inline int octeon_console_avail_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return buffer_size - 1 -
+ octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags)
+{
+ int bytes_to_read;
+ u32 rd_idx, wr_idx;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n",
+ console_num);
+ return 0;
+ }
+
+ console = &oct->console[console_num];
+
+ /* Check to see if any data is available.
+ * Maybe optimize this with 64-bit read.
+ */
+ rd_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_read_index));
+ wr_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_write_index));
+
+ bytes_to_read = octeon_console_avail_bytes(console->buffer_size,
+ wr_idx, rd_idx);
+ if (bytes_to_read <= 0)
+ return bytes_to_read;
+
+ bytes_to_read = MIN(bytes_to_read, (s32)buf_size);
+
+ /* Check to see if what we want to read is not contiguous, and limit
+ * ourselves to the contiguous block
+ */
+ if (rd_idx + bytes_to_read >= console->buffer_size)
+ bytes_to_read = console->buffer_size - rd_idx;
+
+ octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
+ buffer, bytes_to_read);
+ octeon_write_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console,
+ output_read_index),
+ (rd_idx + bytes_to_read) %
+ console->buffer_size);
+
+ return bytes_to_read;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
new file mode 100644
index 000000000000..0d3106b464b2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -0,0 +1,1309 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/** Default configuration
+ * for CN66XX OCTEON Models.
+ */
+static struct octeon_config default_cn66xx_conf = {
+ .card_type = LIO_210SV,
+ .card_name = LIO_210SV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+
+static struct octeon_config default_cn68xx_conf = {
+ .card_type = LIO_410NV,
+ .card_name = LIO_410NV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .nic_if_cfg[2] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 2,
+ },
+
+ .nic_if_cfg[3] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 3,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+static struct octeon_config default_cn68xx_210nv_conf = {
+ .card_type = LIO_210NV,
+ .card_name = LIO_210NV_NAME,
+
+ /** IQ attributes */
+
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+enum {
+ OCTEON_CONFIG_TYPE_DEFAULT = 0,
+ NUM_OCTEON_CONFS,
+};
+
+static struct octeon_config_ptr {
+ u32 conf_type;
+} oct_conf_info[MAX_OCTEON_DEVICES] = {
+ {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ },
+};
+
+static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
+ "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "INVALID"
+};
+
+static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
+ "BASE", "NIC", "UNKNOWN"};
+
+static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static u32 octeon_device_count;
+
+static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
+
+static void oct_set_config_info(int oct_id, int conf_type)
+{
+ if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
+ conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
+ oct_conf_info[oct_id].conf_type = conf_type;
+}
+
+void octeon_init_device_list(int conf_type)
+{
+ int i;
+
+ memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ oct_set_config_info(i, conf_type);
+}
+
+static void *__retrieve_octeon_config_info(struct octeon_device *oct,
+ u16 card_type)
+{
+ u32 oct_id = oct->octeon_id;
+ void *ret = NULL;
+
+ switch (oct_conf_info[oct_id].conf_type) {
+ case OCTEON_CONFIG_TYPE_DEFAULT:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ ret = (void *)&default_cn66xx_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_210NV)) {
+ ret = (void *)&default_cn68xx_210nv_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_410NV)) {
+ ret = (void *)&default_cn68xx_conf;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
+{
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX:
+ return lio_validate_cn6xxx_config_info(oct, conf);
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
+{
+ void *conf = NULL;
+
+ conf = __retrieve_octeon_config_info(oct, card_type);
+ if (!conf)
+ return NULL;
+
+ if (__verify_octeon_config_info(oct, conf)) {
+ dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return conf;
+}
+
+char *lio_get_state_string(atomic_t *state_ptr)
+{
+ s32 istate = (s32)atomic_read(state_ptr);
+
+ if (istate > OCT_DEV_STATES || istate < 0)
+ return oct_dev_state_str[OCT_DEV_STATE_INVALID];
+ return oct_dev_state_str[istate];
+}
+
+static char *get_oct_app_string(u32 app_mode)
+{
+ if (app_mode <= CVM_DRV_APP_END)
+ return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
+ return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
+}
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p;
+ u8 *buffer;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result =
+ crc32(~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
+ LIQUIDIO_VERSION, h->version);
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ memcpy(buffer, data, size);
+
+ p = buffer + sizeof(struct octeon_firmware_file_header);
+
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ /* validate the image */
+ crc32_result = crc32(~0, p, image_len) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
+ dev_err(&oct->pci_dev->dev,
+ "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
+ i, crc32_result,
+ be32_to_cpu(h->desc[i].crc32));
+ ret = -EINVAL;
+ goto done_downloading;
+ }
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+
+ p += image_len;
+ dev_dbg(&oct->pci_dev->dev,
+ "Downloaded image %d (%d bytes) to address 0x%016llx\n",
+ i, image_len, load_addr);
+ }
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+done_downloading:
+ kfree(buffer);
+
+ return ret;
+}
+
+void octeon_free_device_mem(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ /* could check mask as well */
+ if (oct->droq[i])
+ vfree(oct->droq[i]);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ /* could check mask as well */
+ if (oct->instr_queue[i])
+ vfree(oct->instr_queue[i]);
+ }
+
+ i = oct->octeon_id;
+ vfree(oct);
+
+ octeon_device[i] = NULL;
+ octeon_device_count--;
+}
+
+static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
+ u32 priv_size)
+{
+ struct octeon_device *oct;
+ u8 *buf = NULL;
+ u32 octdevsize = 0, configsize = 0, size;
+
+ switch (pci_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ configsize = sizeof(struct octeon_cn6xxx);
+ break;
+
+ default:
+ pr_err("%s: Unknown PCI Device: 0x%x\n",
+ __func__,
+ pci_id);
+ return NULL;
+ }
+
+ if (configsize & 0x7)
+ configsize += (8 - (configsize & 0x7));
+
+ octdevsize = sizeof(struct octeon_device);
+ if (octdevsize & 0x7)
+ octdevsize += (8 - (octdevsize & 0x7));
+
+ if (priv_size & 0x7)
+ priv_size += (8 - (priv_size & 0x7));
+
+ size = octdevsize + priv_size + configsize +
+ (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
+
+ buf = vmalloc(size);
+ if (!buf)
+ return NULL;
+
+ memset(buf, 0, size);
+
+ oct = (struct octeon_device *)buf;
+ oct->priv = (void *)(buf + octdevsize);
+ oct->chip = (void *)(buf + octdevsize + priv_size);
+ oct->dispatch.dlist = (struct octeon_dispatch *)
+ (buf + octdevsize + priv_size + configsize);
+
+ return oct;
+}
+
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size)
+{
+ u32 oct_idx = 0;
+ struct octeon_device *oct = NULL;
+
+ for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
+ if (!octeon_device[oct_idx])
+ break;
+
+ if (oct_idx == MAX_OCTEON_DEVICES)
+ return NULL;
+
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (!oct)
+ return NULL;
+
+ spin_lock_init(&oct->pci_win_lock);
+ spin_lock_init(&oct->mem_access_lock);
+
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+
+ oct->octeon_id = oct_idx;
+ snprintf((oct->device_name), sizeof(oct->device_name),
+ "LiquidIO%d", (oct->octeon_id));
+
+ return oct;
+}
+
+int octeon_setup_instr_queues(struct octeon_device *oct)
+{
+ u32 i, num_iqs = 0;
+ u32 num_descs = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ num_iqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_iqs = 0;
+
+ for (i = 0; i < num_iqs; i++) {
+ oct->instr_queue[i] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[i])
+ return 1;
+
+ memset(oct->instr_queue[i], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
+ if (octeon_init_instr_queue(oct, i, num_descs))
+ return 1;
+
+ oct->num_iqs++;
+ }
+
+ return 0;
+}
+
+int octeon_setup_output_queues(struct octeon_device *oct)
+{
+ u32 i, num_oqs = 0;
+ u32 num_descs = 0;
+ u32 desc_size = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
+ num_oqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ desc_size =
+ CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_oqs = 0;
+
+ for (i = 0; i < num_oqs; i++) {
+ oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
+ if (!oct->droq[i])
+ return 1;
+
+ memset(oct->droq[i], 0, sizeof(struct octeon_droq));
+
+ if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
+ return 1;
+
+ oct->num_oqs++;
+ }
+
+ return 0;
+}
+
+void octeon_set_io_queues_off(struct octeon_device *oct)
+{
+ /* Disable the i/p and o/p queues for this Octeon. */
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+}
+
+void octeon_set_droq_pkt_op(struct octeon_device *oct,
+ u32 q_no,
+ u32 enable)
+{
+ u32 reg_val = 0;
+
+ /* Disable the i/p and o/p queues for this Octeon. */
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+}
+
+int octeon_init_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+
+ oct->dispatch.count = 0;
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ oct->dispatch.dlist[i].opcode = 0;
+ INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
+ }
+
+ for (i = 0; i <= REQTYPE_LAST; i++)
+ octeon_register_reqtype_free_fn(oct, i, NULL);
+
+ spin_lock_init(&oct->dispatch.lock);
+
+ return 0;
+}
+
+void octeon_delete_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+ struct list_head freelist, *temp, *tmp2;
+
+ INIT_LIST_HEAD(&freelist);
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ struct list_head *dispatch;
+
+ dispatch = &oct->dispatch.dlist[i].list;
+ while (dispatch->next != dispatch) {
+ temp = dispatch->next;
+ list_del(temp);
+ list_add_tail(temp, &freelist);
+ }
+
+ oct->dispatch.dlist[i].opcode = 0;
+ }
+
+ oct->dispatch.count = 0;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ list_for_each_safe(temp, tmp2, &freelist) {
+ list_del(temp);
+ vfree(temp);
+ }
+}
+
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode)
+{
+ u32 idx;
+ struct list_head *dispatch;
+ octeon_dispatch_fn_t fn = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn = ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn;
+}
+
+/* octeon_register_dispatch_fn
+ * Parameters:
+ * octeon_id - id of the octeon device.
+ * opcode - opcode for which driver should call the registered function
+ * subcode - subcode for which driver should call the registered function
+ * fn - The function to call when a packet with "opcode" arrives in
+ * octeon output queues.
+ * fn_arg - The argument to be passed when calling function "fn".
+ * Description:
+ * Registers a function and its argument to be called when a packet
+ * arrives in Octeon output queues with "opcode".
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg)
+{
+ u32 idx;
+ octeon_dispatch_fn_t pfn;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+ /* Add dispatch function to first level of lookup table */
+ if (oct->dispatch.dlist[idx].opcode == 0) {
+ oct->dispatch.dlist[idx].opcode = combined_opcode;
+ oct->dispatch.dlist[idx].dispatch_fn = fn;
+ oct->dispatch.dlist[idx].arg = fn_arg;
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+ return 0;
+ }
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ /* Check if there was a function already registered for this
+ * opcode/subcode.
+ */
+ pfn = octeon_get_dispatch(oct, opcode, subcode);
+ if (!pfn) {
+ struct octeon_dispatch *dispatch;
+
+ dev_dbg(&oct->pci_dev->dev,
+ "Adding opcode to dispatch list linked list\n");
+ dispatch = (struct octeon_dispatch *)
+ vmalloc(sizeof(struct octeon_dispatch));
+ if (!dispatch) {
+ dev_err(&oct->pci_dev->dev,
+ "No memory to add dispatch function\n");
+ return 1;
+ }
+ dispatch->opcode = combined_opcode;
+ dispatch->dispatch_fn = fn;
+ dispatch->arg = fn_arg;
+
+ /* Add dispatch function to linked list of fn ptrs
+ * at the hashed index.
+ */
+ spin_lock_bh(&oct->dispatch.lock);
+ list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
+ opcode, subcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* octeon_unregister_dispatch_fn
+ * Parameters:
+ * oct - octeon device
+ * opcode - driver should unregister the function for this opcode
+ * subcode - driver should unregister the function for this subcode
+ * Description:
+ * Unregister the function set for this opcode+subcode.
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
+ u16 subcode)
+{
+ int retval = 0;
+ u32 idx;
+ struct list_head *dispatch, *dfree = NULL, *tmp2;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ if (oct->dispatch.count == 0) {
+ spin_unlock_bh(&oct->dispatch.lock);
+ dev_err(&oct->pci_dev->dev,
+ "No dispatch functions registered for this device\n");
+ return 1;
+ }
+
+ if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
+ dispatch = &oct->dispatch.dlist[idx].list;
+ if (dispatch->next != dispatch) {
+ dispatch = dispatch->next;
+ oct->dispatch.dlist[idx].opcode =
+ ((struct octeon_dispatch *)dispatch)->opcode;
+ oct->dispatch.dlist[idx].dispatch_fn =
+ ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ oct->dispatch.dlist[idx].arg =
+ ((struct octeon_dispatch *)dispatch)->arg;
+ list_del(dispatch);
+ dfree = dispatch;
+ } else {
+ oct->dispatch.dlist[idx].opcode = 0;
+ oct->dispatch.dlist[idx].dispatch_fn = NULL;
+ oct->dispatch.dlist[idx].arg = NULL;
+ }
+ } else {
+ retval = 1;
+ list_for_each_safe(dispatch, tmp2,
+ &(oct->dispatch.dlist[idx].
+ list)) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ list_del(dispatch);
+ dfree = dispatch;
+ retval = 0;
+ }
+ }
+ }
+
+ if (!retval)
+ oct->dispatch.count--;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ if (dfree)
+ vfree(dfree);
+
+ return retval;
+}
+
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
+{
+ u32 i;
+ char app_name[16];
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct octeon_core_setup *cs = NULL;
+ u32 num_nic_ports = 0;
+
+ if (OCTEON_CN6XXX(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+
+ if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
+ dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
+ atomic_read(&oct->status));
+ goto core_drv_init_err;
+ }
+
+ strncpy(app_name,
+ get_oct_app_string(
+ (u32)recv_pkt->rh.r_core_drv_init.app_mode),
+ sizeof(app_name) - 1);
+ oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
+ oct->fw_info.max_nic_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
+ oct->fw_info.num_gmx_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+ }
+
+ if (oct->fw_info.max_nic_ports < num_nic_ports) {
+ dev_err(&oct->pci_dev->dev,
+ "Config has more ports than firmware allows (%d > %d).\n",
+ num_nic_ports, oct->fw_info.max_nic_ports);
+ goto core_drv_init_err;
+ }
+ oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
+ oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ cs = &core_setup[oct->octeon_id];
+
+ if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
+ (u32)sizeof(*cs),
+ recv_pkt->buffer_size[0]);
+ }
+
+ memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
+ strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ OCT_SERIAL_LEN);
+
+ octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
+
+ oct->boardinfo.major = cs->board_rev_major;
+ oct->boardinfo.minor = cs->board_rev_minor;
+
+ dev_info(&oct->pci_dev->dev,
+ "Running %s (%llu Hz)\n",
+ app_name, CVM_CAST64(cs->corefreq));
+
+core_drv_init_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
+
+{
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
+ (oct->io_qmask.iq & (1UL << q_no)))
+ return oct->instr_queue[q_no]->max_count;
+
+ return -1;
+}
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
+{
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
+ (oct->io_qmask.oq & (1UL << q_no)))
+ return oct->droq[q_no]->max_count;
+ return -1;
+}
+
+/* Retruns the host firmware handshake OCTEON specific configuration */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct)
+{
+ struct octeon_config *default_oct_conf = NULL;
+
+ /* check the OCTEON Device model & return the corresponding octeon
+ * configuration
+ */
+
+ if (OCTEON_CN6XXX(oct)) {
+ default_oct_conf =
+ (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ return default_oct_conf;
+}
+
+/* scratch register address is same in all the OCT-II and CN70XX models */
+#define CNXX_SLI_SCRATCH1 0x3C0
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id)
+{
+ if (octeon_id >= MAX_OCTEON_DEVICES)
+ return NULL;
+ else
+ return octeon_device[octeon_id];
+}
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
+{
+ u64 val64;
+ unsigned long flags;
+ u32 val32, addrhi;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ /* The windowed read happens when the LSB of the addr is written.
+ * So write MSB first
+ */
+ addrhi = (addr >> 32);
+ if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ addrhi |= 0x00060000;
+ writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
+
+ /* Read back to preserve ordering of writes */
+ val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+
+ writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
+ val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+
+ val64 = readq(oct->reg_list.pci_win_rd_data);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+
+ return val64;
+}
+
+void lio_pci_writeq(struct octeon_device *oct,
+ u64 val,
+ u64 addr)
+{
+ u32 val32;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ writeq(addr, oct->reg_list.pci_win_wr_addr);
+
+ /* The write happens when the LSB is written. So write MSB first. */
+ writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
+ /* Read the MSB to ensure ordering of writes. */
+ val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+
+ writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+}
+
+int octeon_mem_access_ok(struct octeon_device *oct)
+{
+ u64 access_okay = 0;
+
+ /* Check to make sure a DDR interface is enabled */
+ u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+
+ access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+
+ return access_okay ? 0 : 1;
+}
+
+int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
+{
+ int ret = 1;
+ u32 ms;
+
+ if (!timeout)
+ return ret;
+
+ while (*timeout == 0)
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
+ ms += HZ / 10) {
+ ret = octeon_mem_access_ok(oct);
+
+ /* wait 100 ms */
+ if (ret)
+ schedule_timeout_uninterruptible(HZ / 10);
+ }
+
+ return ret;
+}
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev)
+{
+ struct octeon_device *octeon_dev = (struct octeon_device *)dev;
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ if (octeon_device[i] == octeon_dev)
+ return octeon_dev->octeon_id;
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
new file mode 100644
index 000000000000..36e1f85df8c4
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -0,0 +1,649 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_device.h
+ * \brief Host Driver: This file defines the octeon device structure.
+ */
+
+#ifndef _OCTEON_DEVICE_H_
+#define _OCTEON_DEVICE_H_
+
+/** PCI VendorId Device Id */
+#define OCTEON_CN68XX_PCIID 0x91177d
+#define OCTEON_CN66XX_PCIID 0x92177d
+
+/** Driver identifies chips by these Ids, created by clubbing together
+ * DeviceId+RevisionId; Where Revision Id is not used to distinguish
+ * between chips, a value of 0 is used for revision id.
+ */
+#define OCTEON_CN68XX 0x0091
+#define OCTEON_CN66XX 0x0092
+
+/** Endian-swap modes supported by Octeon. */
+enum octeon_pci_swap_mode {
+ OCTEON_PCI_PASSTHROUGH = 0,
+ OCTEON_PCI_64BIT_SWAP = 1,
+ OCTEON_PCI_32BIT_BYTE_SWAP = 2,
+ OCTEON_PCI_32BIT_LW_SWAP = 3
+};
+
+/*--------------- PCI BAR1 index registers -------------*/
+
+/* BAR1 Mask */
+#define PCI_BAR1_ENABLE_CA 1
+#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP
+#define PCI_BAR1_ENTRY_VALID 1
+#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \
+ | (PCI_BAR1_ENDIAN_MODE << 1) \
+ | PCI_BAR1_ENTRY_VALID)
+
+/** Octeon Device state.
+ * Each octeon device goes through each of these states
+ * as it is initialized.
+ */
+#define OCT_DEV_BEGIN_STATE 0x0
+#define OCT_DEV_PCI_MAP_DONE 0x1
+#define OCT_DEV_DISPATCH_INIT_DONE 0x2
+#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3
+#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4
+#define OCT_DEV_RESP_LIST_INIT_DONE 0x5
+#define OCT_DEV_DROQ_INIT_DONE 0x6
+#define OCT_DEV_IO_QUEUES_DONE 0x7
+#define OCT_DEV_CONSOLE_INIT_DONE 0x8
+#define OCT_DEV_HOST_OK 0x9
+#define OCT_DEV_CORE_OK 0xa
+#define OCT_DEV_RUNNING 0xb
+#define OCT_DEV_IN_RESET 0xc
+#define OCT_DEV_STATE_INVALID 0xd
+
+#define OCT_DEV_STATES OCT_DEV_STATE_INVALID
+
+/** Octeon Device interrupts
+ * These interrupt bits are set in int_status filed of
+ * octeon_device structure
+ */
+#define OCT_DEV_INTR_DMA0_FORCE 0x01
+#define OCT_DEV_INTR_DMA1_FORCE 0x02
+#define OCT_DEV_INTR_PKT_DATA 0x04
+
+#define LIO_RESET_SECS (3)
+
+/*---------------------------DISPATCH LIST-------------------------------*/
+
+/** The dispatch list entry.
+ * The driver keeps a record of functions registered for each
+ * response header opcode in this structure. Since the opcode is
+ * hashed to index into the driver's list, more than one opcode
+ * can hash to the same entry, in which case the list field points
+ * to a linked list with the other entries.
+ */
+struct octeon_dispatch {
+ /** List head for this entry */
+ struct list_head list;
+
+ /** The opcode for which the dispatch function & arg should be used */
+ u16 opcode;
+
+ /** The function to be called for a packet received by the driver */
+ octeon_dispatch_fn_t dispatch_fn;
+
+ /* The application specified argument to be passed to the above
+ * function along with the received packet
+ */
+ void *arg;
+};
+
+/** The dispatch list structure. */
+struct octeon_dispatch_list {
+ /** access to dispatch list must be atomic */
+ spinlock_t lock;
+
+ /** Count of dispatch functions currently registered */
+ u32 count;
+
+ /** The list of dispatch functions */
+ struct octeon_dispatch *dlist;
+};
+
+/*----------------------- THE OCTEON DEVICE ---------------------------*/
+
+#define OCT_MEM_REGIONS 3
+/** PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octeon_mmio {
+ /** PCI address to which the BAR is mapped. */
+ u64 start;
+
+ /** Length of this PCI address space. */
+ u32 len;
+
+ /** Length that has been mapped to phys. address space. */
+ u32 mapped_len;
+
+ /** The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /** Flag indicating the mapping was successful. */
+ u32 done;
+};
+
+#define MAX_OCTEON_MAPS 32
+
+struct octeon_io_enable {
+ u32 iq;
+ u32 oq;
+ u32 iq64B;
+};
+
+struct octeon_reg_list {
+ u32 __iomem *pci_win_wr_addr_hi;
+ u32 __iomem *pci_win_wr_addr_lo;
+ u64 __iomem *pci_win_wr_addr;
+
+ u32 __iomem *pci_win_rd_addr_hi;
+ u32 __iomem *pci_win_rd_addr_lo;
+ u64 __iomem *pci_win_rd_addr;
+
+ u32 __iomem *pci_win_wr_data_hi;
+ u32 __iomem *pci_win_wr_data_lo;
+ u64 __iomem *pci_win_wr_data;
+
+ u32 __iomem *pci_win_rd_data_hi;
+ u32 __iomem *pci_win_rd_data_lo;
+ u64 __iomem *pci_win_rd_data;
+};
+
+#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+struct octeon_console {
+ u32 active;
+ u32 waiting;
+ u64 addr;
+ u32 buffer_size;
+ u64 input_base_addr;
+ u64 output_base_addr;
+ char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
+};
+
+struct octeon_board_info {
+ char name[OCT_BOARD_NAME];
+ char serial_number[OCT_SERIAL_LEN];
+ u64 major;
+ u64 minor;
+};
+
+struct octeon_fn_list {
+ void (*setup_iq_regs)(struct octeon_device *, u32);
+ void (*setup_oq_regs)(struct octeon_device *, u32);
+
+ irqreturn_t (*process_interrupt_regs)(void *);
+ int (*soft_reset)(struct octeon_device *);
+ int (*setup_device_regs)(struct octeon_device *);
+ void (*reinit_regs)(struct octeon_device *);
+ void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
+ void (*bar1_idx_write)(struct octeon_device *, u32, u32);
+ u32 (*bar1_idx_read)(struct octeon_device *, u32);
+ u32 (*update_iq_read_idx)(struct octeon_device *,
+ struct octeon_instr_queue *);
+
+ void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
+ void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
+
+ void (*enable_interrupt)(void *);
+ void (*disable_interrupt)(void *);
+
+ void (*enable_io_queues)(struct octeon_device *);
+ void (*disable_io_queues)(struct octeon_device *);
+};
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatiblity,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /** Base address of named block */
+ u64 base_addr;
+
+ /** Size actually allocated for named block */
+ u64 size;
+
+ /** name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+struct oct_fw_info {
+ u32 max_nic_ports; /** max nic ports for the device */
+ u32 num_gmx_ports; /** num gmx ports */
+ u64 app_cap_flags; /** firmware cap flags */
+
+ /** The core application is running in this mode.
+ * See octeon-drv-opcodes.h for values.
+ */
+ u32 app_mode;
+ char liquidio_firmware_version[32];
+};
+
+/* wrappers around work structs */
+struct cavium_wk {
+ struct delayed_work work;
+ void *ctxptr;
+ size_t ctxul;
+};
+
+struct cavium_wq {
+ struct workqueue_struct *wq;
+ struct cavium_wk wk;
+};
+
+struct octdev_props {
+ /* Each interface in the Octeon device has a network
+ * device pointer (used for OS specific calls).
+ */
+ struct net_device *netdev;
+};
+
+/** The Octeon device.
+ * Each Octeon device has this structure to represent all its
+ * components.
+ */
+struct octeon_device {
+ /** Lock for PCI window configuration accesses */
+ spinlock_t pci_win_lock;
+
+ /** Lock for memory accesses */
+ spinlock_t mem_access_lock;
+
+ /** PCI device pointer */
+ struct pci_dev *pci_dev;
+
+ /** Chip specific information. */
+ void *chip;
+
+ /** Number of interfaces detected in this octeon device. */
+ u32 ifcount;
+
+ struct octdev_props props[MAX_OCTEON_LINKS];
+
+ /** Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /** This device's id - set by the driver. */
+ u32 octeon_id;
+
+ /** This device's PCIe port used for traffic. */
+ u16 pcie_port;
+
+ u16 flags;
+#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
+
+ /** The state of this device */
+ atomic_t status;
+
+ /** memory mapped io range */
+ struct octeon_mmio mmio[OCT_MEM_REGIONS];
+
+ struct octeon_reg_list reg_list;
+
+ struct octeon_fn_list fn_list;
+
+ struct octeon_board_info boardinfo;
+
+ u32 num_iqs;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct octeon_sc_buffer_pool sc_buf_pool;
+
+ /** The input instruction queues */
+ struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+
+ /** The doubly-linked list of instruction response */
+ struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
+
+ u32 num_oqs;
+
+ /** The DROQ output queues */
+ struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+
+ struct octeon_io_enable io_qmask;
+
+ /** List of dispatch functions */
+ struct octeon_dispatch_list dispatch;
+
+ /* Interrupt Moderation */
+ struct oct_intrmod_cfg intrmod;
+
+ u32 int_status;
+
+ u64 droq_intr;
+
+ /** Physical location of the cvmx_bootmem_desc_t in octeon memory */
+ u64 bootmem_desc_addr;
+
+ /** Placeholder memory for named blocks.
+ * Assumes single-threaded access
+ */
+ struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
+
+ /** Address of consoles descriptor */
+ u64 console_desc_addr;
+
+ /** Number of consoles available. 0 means they are inaccessible */
+ u32 num_consoles;
+
+ /* Console caches */
+ struct octeon_console console[MAX_OCTEON_MAPS];
+
+ /* Coprocessor clock rate. */
+ u64 coproc_clock_rate;
+
+ /** The core application is running in this mode. See liquidio_common.h
+ * for values.
+ */
+ u32 app_mode;
+
+ struct oct_fw_info fw_info;
+
+ /** The name given to this device. */
+ char device_name[32];
+
+ /** Application Context */
+ void *app_ctx;
+
+ struct cavium_wq dma_comp_wq;
+
+ struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+
+ struct cavium_wk nic_poll_work;
+
+ struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
+
+ void *priv;
+};
+
+#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
+ (oct->chip_id == OCTEON_CN68XX))
+#define CHIP_FIELD(oct, TYPE, field) \
+ (((struct octeon_ ## TYPE *)(oct->chip))->field)
+
+struct oct_intrmod_cmd {
+ struct octeon_device *oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cfg *cfg;
+};
+
+/*------------------ Function Prototypes ----------------------*/
+
+/** Initialize device list memory */
+void octeon_init_device_list(int conf_type);
+
+/** Free memory for Input and Output queue structures for a octeon device */
+void octeon_free_device_mem(struct octeon_device *);
+
+/* Look up a free entry in the octeon_device table and allocate resources
+ * for the octeon_device structure for an octeon device. Called at init
+ * time.
+ */
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size);
+
+/** Initialize the driver's dispatch list which is a mix of a hash table
+ * and a linked list. This is done at driver load time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return 0 on success, else -ve error value
+ */
+int octeon_init_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Delete the driver's dispatch list and all registered entries.
+ * This is done at driver unload time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ */
+void octeon_delete_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Initialize the core device fields with the info returned by the FW.
+ * @param recv_info - Receive info structure
+ * @param buf - Receive buffer
+ */
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf);
+
+/** Gets the dispatch function registered to receive packets with a
+ * given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch function
+ * is to checked.
+ * @param subcode - the subcode for which the dispatch function
+ * is to checked.
+ *
+ * @return Success: octeon_dispatch_fn_t (dispatch function pointer)
+ * @return Failure: NULL
+ *
+ * Looks up the dispatch list to get the dispatch function for a
+ * given opcode.
+ */
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode);
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id);
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev);
+
+static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
+{
+ return oct->rev_id & 0x3;
+}
+
+/** Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr);
+
+/** Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param val - Value to write
+ * @param addr - Address of the register to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
+
+/* Routines for reading and writing CSRs */
+#define octeon_write_csr(oct_dev, reg_off, value) \
+ writel(value, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_write_csr64(oct_dev, reg_off, val64) \
+ writeq(val64, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr(oct_dev, reg_off) \
+ readl(oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr64(oct_dev, reg_off) \
+ readq(oct_dev->mmio[0].hw_addr + reg_off)
+
+/**
+ * Checks if memory access is okay
+ *
+ * @param oct which octeon to send to
+ * @return Zero on success, negative on failure.
+ */
+int octeon_mem_access_ok(struct octeon_device *oct);
+
+/**
+ * Waits for DDR initialization.
+ *
+ * @param oct which octeon to send to
+ * @param timeout_in_ms pointer to how long to wait until DDR is initialized
+ * in ms.
+ * If contents are 0, it waits until contents are non-zero
+ * before starting to check.
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_ddr_init(struct octeon_device *oct,
+ u32 *timeout_in_ms);
+
+/**
+ * Wait for u-boot to boot and be waiting for a command.
+ *
+ * @param wait_time_hundredths
+ * Maximum time to wait
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths);
+
+/**
+ * Initialize console access
+ *
+ * @param oct which octeon initialize
+ * @return Zero on success, negative on failure.
+ */
+int octeon_init_consoles(struct octeon_device *oct);
+
+/**
+ * Adds access to a console to the device.
+ *
+ * @param oct which octeon to add to
+ * @param console_num which console
+ * @return Zero on success, negative on failure.
+ */
+int octeon_add_console(struct octeon_device *oct, u32 console_num);
+
+/** write or read from a console */
+int octeon_console_write(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 write_request_size, u32 flags);
+int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags);
+int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
+
+/** Removes all attached consoles. */
+void octeon_remove_consoles(struct octeon_device *oct);
+
+/**
+ * Send a string to u-boot on console 0 as a command.
+ *
+ * @param oct which octeon to send to
+ * @param cmd_str String to send
+ * @param wait_hundredths Time to wait for u-boot to accept the command.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths);
+
+/** Parses, validates, and downloads firmware, then boots associated cores.
+ * @param oct which octeon to download firmware to
+ * @param data - The complete firmware file image
+ * @param size - The size of the data
+ *
+ * @return 0 if success.
+ * -EINVAL if file is incompatible or badly formatted.
+ * -ENODEV if no handler was found for the application type or an
+ * invalid octeon id was passed.
+ */
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size);
+
+char *lio_get_state_string(atomic_t *state_ptr);
+
+/** Sets up instruction queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_instr_queues(struct octeon_device *oct);
+
+/** Sets up output queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_output_queues(struct octeon_device *oct);
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
+
+/** Turns off the input and output queues for the device
+ * @param oct which octeon to disable
+ */
+void octeon_set_io_queues_off(struct octeon_device *oct);
+
+/** Turns on or off the given output queue for the device
+ * @param oct which octeon to change
+ * @param q_no which queue
+ * @param enable 1 to enable, 0 to disable
+ */
+void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
+
+/** Retrieve the config for the device
+ * @param oct which octeon
+ * @param card_type type of card
+ *
+ * @returns pointer to configuration
+ */
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
+
+/** Gets the octeon device configuration
+ * @return - pointer to the octeon configuration struture
+ */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
new file mode 100644
index 000000000000..94b502a0cf33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -0,0 +1,989 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/* #define CAVIUM_ONLY_PERF_MODE */
+
+#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
+#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
+
+struct niclist {
+ struct list_head list;
+ void *ptr;
+};
+
+struct __dispatch {
+ struct list_head list;
+ struct octeon_recv_info *rinfo;
+ octeon_dispatch_fn_t disp_fn;
+};
+
+/** Get the argument that the user set when registering dispatch
+ * function for a given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch argument
+ * is to be checked.
+ * @param subcode - the subcode for which the dispatch argument
+ * is to be checked.
+ * @return Success: void * (argument to the dispatch function)
+ * @return Failure: NULL
+ *
+ */
+static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+ u16 opcode, u16 subcode)
+{
+ int idx;
+ struct list_head *dispatch;
+ void *fn_arg = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn_arg = octeon_dev->dispatch.dlist[idx].arg;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn_arg = ((struct octeon_dispatch *)
+ dispatch)->arg;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn_arg;
+}
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 pkt_count = 0;
+
+ pkt_count = readl(droq->pkts_sent_reg);
+ if (pkt_count) {
+ atomic_add(pkt_count, &droq->pkts_pending);
+ writel(pkt_count, droq->pkts_sent_reg);
+ }
+
+ return pkt_count;
+}
+
+static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
+{
+ u32 count = 0;
+
+ /* max_empty_descs is the max. no. of descs that can have no buffers.
+ * If the empty desc count goes beyond this value, we cannot safely
+ * read in a 64K packet sent by Octeon
+ * (64K is max pkt size from Octeon)
+ */
+ droq->max_empty_descs = 0;
+
+ do {
+ droq->max_empty_descs++;
+ count += droq->buffer_size;
+ } while (count < (64 * 1024));
+
+ droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
+}
+
+static void octeon_droq_reset_indices(struct octeon_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ atomic_set(&droq->pkts_pending, 0);
+}
+
+static void
+octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ if (droq->desc_ring) {
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[i].
+ buffer_ptr,
+ droq->buffer_size);
+ }
+ recv_buffer_free(droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ octeon_droq_reset_indices(droq);
+}
+
+static int
+octeon_droq_setup_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+ void *buf;
+ struct octeon_droq_desc *desc_ring = droq->desc_ring;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+
+ if (!buf) {
+ dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->recv_buf_list[i].data = get_rbd(buf);
+
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(oct->pci_dev,
+ droq->recv_buf_list[i].buffer,
+ droq->buffer_size);
+ }
+
+ octeon_droq_reset_indices(droq);
+
+ octeon_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
+{
+ struct octeon_droq *droq = oct->droq[q_no];
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ octeon_droq_destroy_ring_buffers(oct, droq);
+
+ if (droq->recv_buf_list)
+ vfree(droq->recv_buf_list);
+
+ if (droq->info_base_addr)
+ cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
+ droq->info_alloc_size,
+ droq->info_base_addr,
+ droq->info_list_dma);
+
+ if (droq->desc_ring)
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ return 0;
+}
+
+int octeon_init_droq(struct octeon_device *oct,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx)
+{
+ struct octeon_droq *droq;
+ u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
+ u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ droq = oct->droq[q_no];
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ droq->oct_dev = oct;
+ droq->q_no = q_no;
+ if (app_ctx)
+ droq->app_ctx = app_ctx;
+ else
+ droq->app_ctx = (void *)(size_t)q_no;
+
+ c_num_descs = num_descs;
+ c_buf_size = desc_size;
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ }
+
+ droq->max_count = c_num_descs;
+ droq->buffer_size = c_buf_size;
+
+ desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
+
+ if (!droq->desc_ring) {
+ dev_err(&oct->pci_dev->dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, droq->desc_ring_dma);
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->info_list =
+ cnnic_alloc_aligned_dma(oct->pci_dev,
+ (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ &droq->info_list_dma);
+
+ if (!droq->info_list) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+ return 1;
+ }
+
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE);
+ if (!droq->recv_buf_list) {
+ dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (octeon_droq_setup_ring_buffers(oct, droq))
+ goto init_droq_fail;
+
+ droq->pkts_per_intr = c_pkts_per_intr;
+ droq->refill_threshold = c_refill_threshold;
+
+ dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
+ droq->max_empty_descs);
+
+ spin_lock_init(&droq->lock);
+
+ INIT_LIST_HEAD(&droq->dispatch_list);
+
+ /* For 56xx Pass1, this function won't be called, so no checks. */
+ oct->fn_list.setup_oq_regs(oct, q_no);
+
+ oct->io_qmask.oq |= (1 << q_no);
+
+ return 0;
+
+init_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return 1;
+}
+
+/* octeon_create_recv_info
+ * Parameters:
+ * octeon_dev - pointer to the octeon device structure
+ * droq - droq in which the packet arrived.
+ * buf_cnt - no. of buffers used by the packet.
+ * idx - index in the descriptor for the first buffer in the packet.
+ * Description:
+ * Allocates a recv_info_t and copies the buffer addresses for packet data
+ * into the recv_pkt space which starts at an 8B offset from recv_info_t.
+ * Flags the descriptors for refill later. If available descriptors go
+ * below the threshold to receive a 64K pkt, new buffers are first allocated
+ * before the recv_pkt_t is created.
+ * This routine will be called in interrupt context.
+ * Returns:
+ * Success: Pointer to recv_info_t
+ * Failure: NULL.
+ * Locks:
+ * The droq->lock is held when this routine is called.
+ */
+static inline struct octeon_recv_info *octeon_create_recv_info(
+ struct octeon_device *octeon_dev,
+ struct octeon_droq *droq,
+ u32 buf_cnt,
+ u32 idx)
+{
+ struct octeon_droq_info *info;
+ struct octeon_recv_pkt *recv_pkt;
+ struct octeon_recv_info *recv_info;
+ u32 i, bytes_left;
+
+ info = &droq->info_list[idx];
+
+ recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
+ if (!recv_info)
+ return NULL;
+
+ recv_pkt = recv_info->recv_pkt;
+ recv_pkt->rh = info->rh;
+ recv_pkt->length = (u32)info->length;
+ recv_pkt->buffer_count = (u16)buf_cnt;
+ recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
+
+ i = 0;
+ bytes_left = (u32)info->length;
+
+ while (buf_cnt) {
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)droq->desc_ring[idx].buffer_ptr,
+ droq->buffer_size);
+
+ recv_pkt->buffer_size[i] =
+ (bytes_left >=
+ droq->buffer_size) ? droq->buffer_size : bytes_left;
+
+ recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
+ droq->recv_buf_list[idx].buffer = NULL;
+
+ INCR_INDEX_BY1(idx, droq->max_count);
+ bytes_left -= droq->buffer_size;
+ i++;
+ buf_cnt--;
+ }
+
+ return recv_info;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline u32
+octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
+ struct octeon_droq_desc *desc_ring)
+{
+ u32 desc_refilled = 0;
+
+ u32 refill_index = droq->refill_idx;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ droq->recv_buf_list[droq->refill_idx].data =
+ droq->recv_buf_list[refill_index].data;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ INCR_INDEX_BY1(droq->refill_idx,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].
+ buffer);
+ }
+ INCR_INDEX_BY1(refill_index, droq->max_count);
+ } /* while */
+ return desc_refilled;
+}
+
+/* octeon_droq_refill
+ * Parameters:
+ * droq - droq in which descriptors require new buffers.
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ * Returns:
+ * No of descriptors refilled.
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static u32
+octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
+{
+ struct octeon_droq_desc *desc_ring;
+ void *buf = NULL;
+ u8 *data;
+ u32 desc_refilled = 0;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse
+ * the buffer, else allocate.
+ */
+ if (!droq->recv_buf_list[droq->refill_idx].buffer) {
+ buf = recv_buffer_alloc(octeon_dev, droq->q_no,
+ droq->buffer_size);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (!buf)
+ break;
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ buf;
+ data = get_rbd(buf);
+ } else {
+ data = get_rbd(droq->recv_buf_list
+ [droq->refill_idx].buffer);
+ }
+
+ droq->recv_buf_list[droq->refill_idx].data = data;
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(octeon_dev->pci_dev,
+ droq->recv_buf_list[droq->
+ refill_idx].buffer,
+ droq->buffer_size);
+
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled +=
+ octeon_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static inline u32
+octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
+{
+ u32 buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+ return buf_cnt;
+}
+
+static int
+octeon_droq_dispatch_pkt(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ union octeon_rh *rh,
+ struct octeon_droq_info *info)
+{
+ u32 cnt;
+ octeon_dispatch_fn_t disp_fn;
+ struct octeon_recv_info *rinfo;
+
+ cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
+
+ disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
+ (u16)rh->r.subcode);
+ if (disp_fn) {
+ rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
+ if (rinfo) {
+ struct __dispatch *rdisp = rinfo->rsvd;
+
+ rdisp->rinfo = rinfo;
+ rdisp->disp_fn = disp_fn;
+ rinfo->recv_pkt->rh = *rh;
+ list_add_tail(&rdisp->list,
+ &droq->dispatch_list);
+ } else {
+ droq->stats.dropped_nomem++;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ droq->stats.dropped_nodispatch++;
+ } /* else (dispatch_fn ... */
+
+ return cnt;
+}
+
+static inline void octeon_droq_drop_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 cnt)
+{
+ u32 i = 0, buf_cnt;
+ struct octeon_droq_info *info;
+
+ for (i = 0; i < cnt; i++) {
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (info->length) {
+ info->length -= OCT_RH_SIZE;
+ droq->stats.bytes_received += info->length;
+ buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
+ (u32)info->length);
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
+ buf_cnt = 1;
+ }
+
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ }
+}
+
+static u32
+octeon_droq_fast_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 pkts_to_process)
+{
+ struct octeon_droq_info *info;
+ union octeon_rh *rh;
+ u32 pkt, total_len = 0, pkt_count;
+
+ pkt_count = pkts_to_process;
+
+ for (pkt = 0; pkt < pkt_count; pkt++) {
+ u32 pkt_len = 0;
+ struct sk_buff *nicbuf = NULL;
+
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (!info->length) {
+ dev_err(&oct->pci_dev->dev,
+ "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ droq->q_no, droq->read_idx, pkt_count);
+ print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
+ (u8 *)info,
+ OCT_DROQ_INFO_SIZE);
+ break;
+ }
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCT_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (u32)info->length;
+
+ if (OPCODE_SLOW_PATH(rh)) {
+ u32 buf_cnt;
+
+ buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[
+ droq->read_idx].buffer_ptr,
+ droq->buffer_size);
+ pkt_len = (u32)info->length;
+ nicbuf = droq->recv_buf_list[
+ droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+ INCR_INDEX_BY1(droq->read_idx, droq->max_count);
+ skb_put(nicbuf, pkt_len);
+ droq->refill_count++;
+ } else {
+ nicbuf = octeon_fast_packet_alloc(oct, droq,
+ droq->q_no,
+ (u32)
+ info->length);
+ pkt_len = 0;
+ /* nicbuf allocation can fail. We'll handle it
+ * inside the loop.
+ */
+ while (pkt_len < info->length) {
+ int cpy_len;
+
+ cpy_len = ((pkt_len +
+ droq->buffer_size) >
+ info->length) ?
+ ((u32)info->length - pkt_len) :
+ droq->buffer_size;
+
+ if (nicbuf) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)
+ droq->desc_ring
+ [droq->read_idx].
+ buffer_ptr,
+ droq->
+ buffer_size);
+ octeon_fast_packet_next(droq,
+ nicbuf,
+ cpy_len,
+ droq->
+ read_idx
+ );
+ }
+
+ pkt_len += cpy_len;
+ INCR_INDEX_BY1(droq->read_idx,
+ droq->max_count);
+ droq->refill_count++;
+ }
+ }
+
+ if (nicbuf) {
+ if (droq->ops.fptr)
+ droq->ops.fptr(oct->octeon_id,
+ nicbuf, pkt_len,
+ rh, &droq->napi);
+ else
+ recv_buffer_free(nicbuf);
+ }
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = octeon_droq_refill(oct, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel((desc_refilled), droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
+
+ } /* for ( each packet )... */
+
+ /* Increment refill_count by the number of buffers processed. */
+ droq->stats.pkts_received += pkt;
+ droq->stats.bytes_received += total_len;
+
+ if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
+ octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
+
+ droq->stats.dropped_toomany += (pkts_to_process - pkt);
+ return pkts_to_process;
+ }
+
+ return pkt;
+}
+
+int
+octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget)
+{
+ u32 pkt_count = 0, pkts_processed = 0;
+ struct list_head *tmp, *tmp2;
+
+ pkt_count = atomic_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ spin_lock(&droq->lock);
+
+ pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ /* Release the spin lock */
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ /* If there are packets pending. schedule tasklet again */
+ if (atomic_read(&droq->pkts_pending))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Utility function to poll for packets. check_hw_for_packets must be
+ * called before calling this routine.
+ */
+
+static int
+octeon_droq_process_poll_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq, u32 budget)
+{
+ struct list_head *tmp, *tmp2;
+ u32 pkts_available = 0, pkts_processed = 0;
+ u32 total_pkts_processed = 0;
+
+ if (budget > droq->max_count)
+ budget = droq->max_count;
+
+ spin_lock(&droq->lock);
+
+ while (total_pkts_processed < budget) {
+ pkts_available =
+ CVM_MIN((budget - total_pkts_processed),
+ (u32)(atomic_read(&droq->pkts_pending)));
+
+ if (pkts_available == 0)
+ break;
+
+ pkts_processed =
+ octeon_droq_fast_process_packets(oct, droq,
+ pkts_available);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ total_pkts_processed += pkts_processed;
+
+ octeon_droq_check_hw_for_pkts(oct, droq);
+ }
+
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ return total_pkts_processed;
+}
+
+int
+octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
+ u32 arg)
+{
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (cmd == POLL_EVENT_PROCESS_PKTS)
+ return octeon_droq_process_poll_pkts(oct, droq, arg);
+
+ if (cmd == POLL_EVENT_PENDING_PKTS) {
+ u32 pkt_cnt = atomic_read(&droq->pkts_pending);
+
+ return octeon_droq_process_packets(oct, droq, pkt_cnt);
+ }
+
+ if (cmd == POLL_EVENT_ENABLE_INTR) {
+ u32 value;
+ unsigned long flags;
+
+ /* Enable Pkt Interrupt */
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ spin_lock_irqsave
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB,
+ value);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB,
+ value);
+
+ /* don't bother flushing the enables */
+
+ spin_unlock_irqrestore
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ return 0;
+ }
+ break;
+ }
+
+ return 0;
+ }
+
+ dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
+ return -EINVAL;
+}
+
+int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
+ struct octeon_droq_ops *ops)
+{
+ struct octeon_droq *droq;
+ unsigned long flags;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (!(ops)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
+{
+ unsigned long flags;
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, oct->num_oqs - 1);
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (!droq) {
+ dev_info(&oct->pci_dev->dev,
+ "Droq id (%d) not available.\n", q_no);
+ return 0;
+ }
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ droq->ops.fptr = NULL;
+ droq->ops.drop_on_max = 0;
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_create_droq(struct octeon_device *oct,
+ u32 q_no, u32 num_descs,
+ u32 desc_size, void *app_ctx)
+{
+ struct octeon_droq *droq;
+
+ if (oct->droq[q_no]) {
+ dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
+ q_no);
+ return 1;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = vmalloc(sizeof(*droq));
+ if (!droq)
+ goto create_droq_fail;
+ memset(droq, 0, sizeof(struct octeon_droq));
+
+ /*Disable the pkt o/p for this Q */
+ octeon_set_droq_pkt_op(oct, q_no, 0);
+ oct->droq[q_no] = droq;
+
+ /* Initialize the Droq */
+ octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
+
+ oct->num_oqs++;
+
+ dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
+ oct->num_oqs);
+
+ /* Global Droq register settings */
+
+ /* As of now not required, as setting are done for all 32 Droqs at
+ * the same time.
+ */
+ return 0;
+
+create_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
new file mode 100644
index 000000000000..7940ccee12d9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -0,0 +1,426 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_droq.h
+ * \brief Implementation of Octeon Output queues. "Output" is with
+ * respect to the Octeon device on the NIC. From this driver's point of
+ * view they are ingress queues.
+ */
+
+#ifndef __OCTEON_DROQ_H__
+#define __OCTEON_DROQ_H__
+
+/* Default number of packets that will be processed in one iteration. */
+#define MAX_PACKET_BUDGET 0xFFFFFFFF
+
+/** Octeon descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a octeon_droq_info structure.
+ * The Octeon device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct octeon_droq_desc {
+ /** The buffer pointer */
+ u64 buffer_ptr;
+
+ /** The Info pointer */
+ u64 info_ptr;
+};
+
+#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct octeon_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ u64 length;
+};
+
+#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+*/
+struct octeon_recv_buffer {
+ /** Packet buffer, including metadata. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ u8 *data;
+};
+
+#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct oct_droq_stats {
+ /** Number of packets received in this queue. */
+ u64 pkts_received;
+
+ /** Bytes received by this queue. */
+ u64 bytes_received;
+
+ /** Packets dropped due to no dispatch function. */
+ u64 dropped_nodispatch;
+
+ /** Packets dropped due to no memory available. */
+ u64 dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ u64 dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ u64 rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ u64 rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ u64 rx_dropped;
+};
+
+#define POLL_EVENT_INTR_ARRIVED 1
+#define POLL_EVENT_PROCESS_PKTS 2
+#define POLL_EVENT_PENDING_PKTS 3
+#define POLL_EVENT_ENABLE_INTR 4
+
+/* The maximum number of buffers that can be dispatched from the
+ * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
+ * max packet size from DROQ is 64K.
+ */
+#define MAX_RECV_BUFS 64
+
+/** Receive Packet format used when dispatching output queue packets
+ * with non-raw opcodes.
+ * The received packet will be sent to the upper layers using this
+ * structure which is passed as a parameter to the dispatch function
+ */
+struct octeon_recv_pkt {
+ /** Number of buffers in this received packet */
+ u16 buffer_count;
+
+ /** Id of the device that is sending the packet up */
+ u16 octeon_id;
+
+ /** Length of data in the packet buffer */
+ u32 length;
+
+ /** The receive header */
+ union octeon_rh rh;
+
+ /** Pointer to the OS-specific packet buffer */
+ void *buffer_ptr[MAX_RECV_BUFS];
+
+ /** Size of the buffers pointed to by ptr's in buffer_ptr */
+ u32 buffer_size[MAX_RECV_BUFS];
+};
+
+#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt))
+
+/** The first parameter of a dispatch function.
+ * For a raw mode opcode, the driver dispatches with the device
+ * pointer in this structure.
+ * For non-raw mode opcode, the driver dispatches the recv_pkt
+ * created to contain the buffers with data received from Octeon.
+ * ---------------------
+ * | *recv_pkt ----|---
+ * |-------------------| |
+ * | 0 or more bytes | |
+ * | reserved by driver| |
+ * |-------------------|<-/
+ * | octeon_recv_pkt |
+ * | |
+ * |___________________|
+ */
+struct octeon_recv_info {
+ void *rsvd;
+ struct octeon_recv_pkt *recv_pkt;
+};
+
+#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info))
+
+/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info
+ * structure is filled in before this call returns.
+ * @param extra_bytes - extra bytes to be allocated at the end of the recv info
+ * structure.
+ * @return - pointer to a newly allocated recv_info structure.
+ */
+static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes)
+{
+ struct octeon_recv_info *recv_info;
+ u8 *buf;
+
+ buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE +
+ extra_bytes, GFP_ATOMIC);
+ if (!buf)
+ return NULL;
+
+ recv_info = (struct octeon_recv_info *)buf;
+ recv_info->recv_pkt =
+ (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE);
+ recv_info->rsvd = NULL;
+ if (extra_bytes)
+ recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE;
+
+ return recv_info;
+}
+
+/** Free a recv_info structure.
+ * @param recv_info - Pointer to receive_info to be freed
+ */
+static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info)
+{
+ kfree(recv_info);
+}
+
+typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *);
+
+/** Used by NIC module to register packet handler and to get device
+ * information for each octeon device.
+ */
+struct octeon_droq_ops {
+ /** This registered function will be called by the driver with
+ * the octeon id, pointer to buffer from droq and length of
+ * data in the buffer. The receive header gives the port
+ * number to the caller. Function pointer is set by caller.
+ */
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+
+ /* This function will be called by the driver for all NAPI related
+ * events. The first param is the octeon id. The second param is the
+ * output queue number. The third is the NAPI event that occurred.
+ */
+ void (*napi_fn)(void *);
+
+ u32 poll_mode;
+
+ /** Flag indicating if the DROQ handler should drop packets that
+ * it cannot handle in one iteration. Set by caller.
+ */
+ u32 drop_on_max;
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon DROQ.
+ */
+struct octeon_droq {
+ /** A spinlock to protect access to this ring. */
+ spinlock_t lock;
+
+ u32 q_no;
+
+ struct octeon_droq_ops ops;
+
+ struct octeon_device *oct_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct octeon_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ u32 read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ u32 write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ u32 refill_idx;
+
+ /** Packets pending to be processed */
+ atomic_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ u32 max_count;
+
+ /** The number of descriptors pending refill. */
+ u32 refill_count;
+
+ u32 pkts_per_intr;
+ u32 refill_threshold;
+
+ /** The max number of descriptors in DROQ without a buffer.
+ * This field is used to keep track of empty space threshold. If the
+ * refill_count reaches this value, the DROQ cannot accept a max-sized
+ * (64K) packet.
+ */
+ u32 max_empty_descs;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct octeon_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct octeon_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void __iomem *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void __iomem *pkts_sent_reg;
+
+ struct list_head dispatch_list;
+
+ /** Statistics for this DROQ. */
+ struct oct_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ u32 info_alloc_size;
+
+ /** application context */
+ void *app_ctx;
+
+ struct napi_struct napi;
+
+ u32 cpu_id;
+
+ struct call_single_data csd;
+};
+
+#define OCT_DROQ_SIZE (sizeof(struct octeon_droq))
+
+/**
+ * Allocates space for the descriptor ring for the droq and sets the
+ * base addr, num desc etc in Octeon registers.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: 1
+*/
+int octeon_init_droq(struct octeon_device *oct_dev,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx);
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @return: Success: 0 Failure: 1
+*/
+int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
+
+/** Register a change in droq operations. The ops field has a pointer to a
+ * function which will called by the DROQ handler for all packets arriving
+ * on output queues given by q_no irrespective of the type of packet.
+ * The ops field also has a flag which if set tells the DROQ handler to
+ * drop packets if it receives more than what it can process in one
+ * invocation of the handler.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @param ops - the droq_ops settings for this queue
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int
+octeon_register_droq_ops(struct octeon_device *oct,
+ u32 q_no,
+ struct octeon_droq_ops *ops);
+
+/** Resets the function pointer and flag settings made by
+ * octeon_register_droq_ops(). After this routine is called, the DROQ handler
+ * will lookup dispatch function for each arriving packet on the output queue
+ * given by q_no.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
+
+/** Register a dispatch function for a opcode/subcode. The driver will call
+ * this dispatch function when it receives a packet with the given
+ * opcode/subcode in its output queues along with the user specified
+ * argument.
+ * @param oct - the octeon device to register with.
+ * @param opcode - the opcode for which the dispatch will be registered.
+ * @param subcode - the subcode for which the dispatch will be registered
+ * @param fn - the dispatch function.
+ * @param fn_arg - user specified that will be passed along with the
+ * dispatch function by the driver.
+ * @return Success: 0; Failure: 1
+ */
+int octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg);
+
+/** Remove registration for an opcode/subcode. This will delete the mapping for
+ * an opcode/subcode. The dispatch function will be unregistered and will no
+ * longer be called if a packet with the opcode/subcode arrives in the driver
+ * output queues.
+ * @param oct - the octeon device to unregister from.
+ * @param opcode - the opcode to be unregistered.
+ * @param subcode - the subcode to be unregistered.
+ *
+ * @return Success: 0; Failure: 1
+ */
+int octeon_unregister_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode);
+
+void octeon_droq_print_stats(void);
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq);
+
+int octeon_create_droq(struct octeon_device *oct, u32 q_no,
+ u32 num_descs, u32 desc_size, void *app_ctx);
+
+int octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget);
+
+int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
+ int cmd, u32 arg);
+
+#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
new file mode 100644
index 000000000000..592fe49b589d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -0,0 +1,319 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_iq.h
+ * \brief Host Driver: Implementation of Octeon input queues. "Input" is
+ * with respect to the Octeon device on the NIC. From this driver's
+ * point of view they are egress queues.
+ */
+
+#ifndef __OCTEON_IQ_H__
+#define __OCTEON_IQ_H__
+
+#define IQ_STATUS_RUNNING 1
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+/*------------------------- INSTRUCTION QUEUE --------------------------*/
+
+/* \cond */
+
+#define REQTYPE_NONE 0
+#define REQTYPE_NORESP_NET 1
+#define REQTYPE_NORESP_NET_SG 2
+#define REQTYPE_RESP_NET 3
+#define REQTYPE_RESP_NET_SG 4
+#define REQTYPE_SOFT_COMMAND 5
+#define REQTYPE_LAST 5
+
+struct octeon_request_list {
+ u32 reqtype;
+ void *buf;
+};
+
+/* \endcond */
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct oct_iq_stats {
+ u64 instr_posted; /**< Instructions posted to this queue. */
+ u64 instr_processed; /**< Instructions processed in this queue. */
+ u64 instr_dropped; /**< Instructions that could not be processed */
+ u64 bytes_sent; /**< Bytes sent through this queue. */
+ u64 sgentry_sent;/**< Gather entries sent through this queue. */
+ u64 tx_done;/**< Num of packets sent to network. */
+ u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
+ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
+ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+};
+
+#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (upto 4) for
+ * a Octeon device has one such structure to represent it.
+*/
+struct octeon_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ spinlock_t lock;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ u32 iqcmd_64B:1;
+
+ /** Queue Number. */
+ u32 iq_no:5;
+
+ u32 rsvd:17;
+
+ /* Controls the periodic flushing of iq */
+ u32 do_auto_flush:1;
+
+ u32 status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ u32 max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ u32 host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ u32 octeon_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u32 flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ u32 reset_instr_cnt;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ u8 *base_addr;
+
+ struct octeon_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void __iomem *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void __iomem *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /** The max. number of instructions that can be held pending by the
+ * driver.
+ */
+ u32 fill_threshold;
+
+ /** The last time that the doorbell was rung. */
+ u64 last_db_time;
+
+ /** The doorbell timeout. If the doorbell was not rung for this time and
+ * fill_cnt is non-zero, ring the doorbell again.
+ */
+ u32 db_timeout;
+
+ /** Statistics for this input queue. */
+ struct oct_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ u64 base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+/** 32-byte instruction format.
+ * Format of instruction for a 32-byte mode input queue.
+ */
+struct octeon_instr_32B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /** Input Request Header. Additional info about the input. */
+ u64 irh;
+
+};
+
+#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B))
+
+/** 64-byte instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ */
+struct octeon_instr_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Input Request Header. */
+ u64 irh;
+
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ u64 reserved;
+
+};
+
+#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+
+/** The size of each buffer in soft command buffer pool
+ */
+#define SOFT_COMMAND_BUFFER_SIZE 1024
+
+struct octeon_soft_command {
+ /** Soft command buffer info. */
+ struct list_head node;
+ u64 dma_addr;
+ u32 size;
+
+ /** Command and return status */
+ struct octeon_instr_64B cmd;
+#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ u64 *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ u64 dmadptr;
+ u32 datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ u64 dmarptr;
+ u32 rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ u32 ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ u32 iq_no;
+ void (*callback)(struct octeon_device *, u32, void *);
+ void *callback_arg;
+};
+
+/** Maximum number of buffers to allocate into soft command buffer pool
+ */
+#define MAX_SOFT_COMMAND_BUFFERS 16
+
+/** Head of a soft command buffer pool.
+ */
+struct octeon_sc_buffer_pool {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t alloc_buf_count;
+};
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_buffer_pool(struct octeon_device *oct);
+struct octeon_soft_command *
+ octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize, u32 rdatasize,
+ u32 ctxsize);
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+/**
+ * octeon_init_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+ u32 num_descs);
+
+/**
+ * octeon_delete_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be deleted (0 <= q_no <= 3).
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct);
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *));
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq);
+
+int octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype);
+
+void octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode, u8 subcode,
+ u32 irh_ossp, u64 ossp0,
+ u64 ossp1);
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
+ u32 num_descs, void *app_ctx);
+
+#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
new file mode 100644
index 000000000000..cbd081981180
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -0,0 +1,237 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_main.h
+ * \brief Host Driver: This file is included by all host driver source files
+ * to include common definitions.
+ */
+
+#ifndef _OCTEON_MAIN_H_
+#define _OCTEON_MAIN_H_
+
+#if BITS_PER_LONG == 32
+#define CVM_CAST64(v) ((long long)(v))
+#elif BITS_PER_LONG == 64
+#define CVM_CAST64(v) ((long long)(long)(v))
+#else
+#error "Unknown system architecture"
+#endif
+
+#define DRV_NAME "LiquidIO"
+
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+int octeon_console_debug_enabled(u32 console);
+
+/* BQL-related functions */
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl);
+
+/** Swap 8B blocks */
+static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
+{
+ while (blocks) {
+ cpu_to_be64s(data);
+ blocks--;
+ data++;
+ }
+}
+
+/**
+ * \brief unmaps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ */
+static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
+{
+ dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
+ baridx);
+
+ if (oct->mmio[baridx].done)
+ iounmap(oct->mmio[baridx].hw_addr);
+
+ if (oct->mmio[baridx].start)
+ pci_release_region(oct->pci_dev, baridx * 2);
+}
+
+/**
+ * \brief maps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ * @param max_map_len maximum length of mapped memory
+ */
+static inline int octeon_map_pci_barx(struct octeon_device *oct,
+ int baridx, int max_map_len)
+{
+ u32 mapped_len = 0;
+
+ if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
+ dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
+ baridx);
+ return 1;
+ }
+
+ oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
+ oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
+
+ mapped_len = oct->mmio[baridx].len;
+ if (!mapped_len)
+ return 1;
+
+ if (max_map_len && (mapped_len > max_map_len))
+ mapped_len = max_map_len;
+
+ oct->mmio[baridx].hw_addr =
+ ioremap(oct->mmio[baridx].start, mapped_len);
+ oct->mmio[baridx].mapped_len = mapped_len;
+
+ dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
+ baridx, oct->mmio[baridx].start, mapped_len,
+ oct->mmio[baridx].len);
+
+ if (!oct->mmio[baridx].hw_addr) {
+ dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
+ baridx);
+ return 1;
+ }
+ oct->mmio[baridx].done = 1;
+
+ return 0;
+}
+
+static inline void *
+cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
+ u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ size_t *dma_addr __attribute__((unused)))
+{
+ int retries = 0;
+ void *ptr = NULL;
+
+#define OCTEON_MAX_ALLOC_RETRIES 1
+ do {
+ ptr =
+ (void *)__get_free_pages(GFP_KERNEL,
+ get_order(size));
+ if ((unsigned long)ptr & 0x07) {
+ free_pages((unsigned long)ptr, get_order(size));
+ ptr = NULL;
+ /* Increment the size required if the first
+ * attempt failed.
+ */
+ if (!retries)
+ size += 7;
+ }
+ retries++;
+ } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
+
+ *alloc_size = size;
+ *orig_ptr = (unsigned long)ptr;
+ if ((unsigned long)ptr & 0x07)
+ ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
+ return ptr;
+}
+
+#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
+ free_pages(orig_ptr, get_order(size))
+
+static inline void
+sleep_cond(wait_queue_head_t *wait_queue, int *condition)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ while (!(ACCESS_ONCE(*condition))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+static inline void
+sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(waitq, &we);
+ while (!atomic_read(pcond)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(waitq, &we);
+}
+
+/* Gives up the CPU for a timeout period.
+ * Check that the condition is not true before we go to sleep for a
+ * timeout period.
+ */
+static inline void
+sleep_timeout_cond(wait_queue_head_t *wait_queue,
+ int *condition,
+ int timeout)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!(*condition))
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef ROUNDUP128
+#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
+#endif
+
+#endif /* _OCTEON_MAIN_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
new file mode 100644
index 000000000000..5aecef870377
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -0,0 +1,199 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+
+static inline void
+octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
+ u32 idx __attribute__((unused)))
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 mask;
+
+ mask = oct->fn_list.bar1_idx_read(oct, idx);
+ mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
+ oct->fn_list.bar1_idx_write(oct, idx, mask);
+#endif
+}
+
+static void
+octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ writeb(*(hostbuf++), mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ writeq(*((u64 *)hostbuf), mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ writeb(*(hostbuf++), mapped_addr++);
+}
+
+static void
+octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ *(hostbuf++) = readb(mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ *((u64 *)hostbuf) = readq(mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ *(hostbuf++) = readb(mapped_addr++);
+}
+
+/* Core mem read/write with temporary bar1 settings. */
+/* op = 1 to read, op = 0 to write. */
+static void
+__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
+ u8 *hostbuf, u32 len, u32 op)
+{
+ u32 copy_len = 0, index_reg_val = 0;
+ unsigned long flags;
+ u8 __iomem *mapped_addr;
+
+ spin_lock_irqsave(&oct->mem_access_lock, flags);
+
+ /* Save the original index reg value. */
+ index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
+ do {
+ oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
+ mapped_addr = oct->mmio[1].hw_addr
+ + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
+
+ /* If operation crosses a 4MB boundary, split the transfer
+ * at the 4MB
+ * boundary.
+ */
+ if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
+ copy_len = (u32)(((addr & ~(0x3fffff)) +
+ (MEMOPS_IDX << 22)) - addr);
+ } else {
+ copy_len = len;
+ }
+
+ if (op) { /* read from core */
+ octeon_pci_fastread(oct, mapped_addr, hostbuf,
+ copy_len);
+ } else {
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
+ copy_len);
+ }
+
+ len -= copy_len;
+ addr += copy_len;
+ hostbuf += copy_len;
+
+ } while (len);
+
+ oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
+
+ spin_unlock_irqrestore(&oct->mem_access_lock, flags);
+}
+
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
+}
+
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+}
+
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
+{
+ __be64 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
+
+ return be64_to_cpu(ret);
+}
+
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
+{
+ __be32 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
+
+ return be32_to_cpu(ret);
+}
+
+void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
+ u32 val)
+{
+ __be32 t = cpu_to_be32(val);
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
new file mode 100644
index 000000000000..11b183377b44
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -0,0 +1,75 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_mem_ops.h
+ * \brief Host Driver: Routines used to read/write Octeon memory.
+ */
+
+#ifndef __OCTEON_MEM_OPS_H__
+#define __OCTEON_MEM_OPS_H__
+
+/** Read a 64-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * The range_idx gives the BAR1 index register for the range of address
+ * in which core_addr is mapped.
+ *
+ * @return 64-bit value read from Core memory
+ */
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr);
+
+/** Read a 32-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * @return 32-bit value read from Core memory
+ */
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr);
+
+/** Write a 32-bit value to a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to write to.
+ * @param val - 32-bit value to write.
+ */
+void
+octeon_write_device_mem32(struct octeon_device *oct,
+ u64 core_addr,
+ u32 val);
+
+/** Read multiple bytes from Octeon memory.
+ */
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+/** Write multiple bytes into Octeon memory.
+ */
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
new file mode 100644
index 000000000000..b3abe5818fd3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -0,0 +1,224 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_network.h
+ * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
+ */
+
+#ifndef __OCTEON_NETWORK_H__
+#define __OCTEON_NETWORK_H__
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/ptp_clock_kernel.h>
+
+/** LiquidIO per-interface network private data */
+struct lio {
+ /** State of the interface. Rx/Tx happens only in the RUNNING state. */
+ atomic_t ifstate;
+
+ /** Octeon Interface index number. This device will be represented as
+ * oct<ifidx> in the system.
+ */
+ int ifidx;
+
+ /** Octeon Input queue to use to transmit for this network interface. */
+ int txq;
+
+ /** Octeon Output queue from which pkts arrive
+ * for this network interface.
+ */
+ int rxq;
+
+ /** Guards the glist */
+ spinlock_t lock;
+
+ /** Linked list of gather components */
+ struct list_head glist;
+
+ /** Pointer to the NIC properties for the Octeon device this network
+ * interface is associated with.
+ */
+ struct octdev_props *octprops;
+
+ /** Pointer to the octeon device structure. */
+ struct octeon_device *oct_dev;
+
+ struct net_device *netdev;
+
+ /** Link information sent by the core application for this interface. */
+ struct oct_link_info linfo;
+
+ /** Size of Tx queue for this octeon device. */
+ u32 tx_qsize;
+
+ /** Size of Rx queue for this octeon device. */
+ u32 rx_qsize;
+
+ /** Size of MTU this octeon device. */
+ u32 mtu;
+
+ /** msg level flag per interface. */
+ u32 msg_enable;
+
+ /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
+ u64 dev_capability;
+
+ /** Copy of beacaon reg in phy */
+ u32 phy_beacon_val;
+
+ /** Copy of ctrl reg in phy */
+ u32 led_ctrl_val;
+
+ /* PTP clock information */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ s64 ptp_adjust;
+
+ /* for atomic access to Octeon PTP reg and data struct */
+ spinlock_t ptp_lock;
+
+ /* Interface info */
+ u32 intf_open;
+
+ /* work queue for txq status */
+ struct cavium_wq txq_status_wq;
+
+};
+
+#define LIO_SIZE (sizeof(struct lio))
+#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+
+/**
+ * \brief Enable or disable feature
+ * @param netdev pointer to network device
+ * @param cmd Command that just requires acknowledgment
+ */
+int liquidio_set_feature(struct net_device *netdev, int cmd);
+
+/**
+ * \brief Link control command completion callback
+ * @param nctrl_ptr pointer to control packet structure
+ *
+ * This routine is called by the callback function when a ctrl pkt sent to
+ * core app completes. The nctrl_ptr contains a copy of the command type
+ * and data sent to the core app. This routine is only called if the ctrl
+ * pkt was sent successfully to the core app.
+ */
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+
+/**
+ * \brief Register ethtool operations
+ * @param netdev pointer to network device
+ */
+void liquidio_set_ethtool_ops(struct net_device *netdev);
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
+ u32 q_no __attribute__((unused)), u32 size)
+{
+#define SKB_ADJ_MASK 0x3F
+#define SKB_ADJ (SKB_ADJ_MASK + 1)
+
+ struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ return (void *)skb;
+}
+
+static inline void recv_buffer_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+#define lio_dma_alloc(oct, size, dma_addr) \
+ dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+#define lio_dma_free(oct, size, virt_addr, dma_addr) \
+ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
+
+#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+
+static inline u64
+lio_map_ring_info(struct octeon_droq *droq, u32 i)
+{
+ dma_addr_t dma_addr;
+ struct octeon_device *oct = droq->oct_dev;
+
+ dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
+ OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring_info(struct pci_dev *pci_dev,
+ u64 info_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+}
+
+static inline u64
+lio_map_ring(struct pci_dev *pci_dev,
+ void *buf, u32 size)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
+ DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring(struct pci_dev *pci_dev,
+ u64 buf_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev,
+ buf_ptr, size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 q_no, u32 size)
+{
+ return recv_buffer_alloc(oct, q_no, size);
+}
+
+static inline void octeon_fast_packet_next(struct octeon_droq *droq,
+ struct sk_buff *nicbuf,
+ int copy_len,
+ int idx)
+{
+ memcpy(skb_put(nicbuf, copy_len),
+ get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+}
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
new file mode 100644
index 000000000000..1a0191549cb3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -0,0 +1,189 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize)
+{
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, rdatasize, 0);
+
+ if (!sc)
+ return NULL;
+
+ /* Copy existing command structure into the soft command */
+ memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+
+ /* Add in the response related fields. Opcode and Param are already
+ * there.
+ */
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->rflag = 1; /* a response is required */
+ irh->len = 4; /* means four 64-bit words immediately follow irh */
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = rdatasize;
+
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ sc->wait_time = 1000;
+ sc->timeout = jiffies + sc->wait_time;
+
+ return sc;
+}
+
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ u32 xmit_more)
+{
+ int ring_doorbell;
+
+ ring_doorbell = !xmit_more;
+
+ return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
+ ndata->buf, ndata->datasize,
+ ndata->reqtype);
+}
+
+static void octnet_link_ctrl_callback(struct octeon_device *oct,
+ u32 status,
+ void *sc_ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr;
+ struct octnic_ctrl_pkt *nctrl;
+
+ nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
+
+ /* Call the callback function if status is OK.
+ * Status is OK only if a response was expected and core returned
+ * success.
+ * If no response was expected, status is OK if the command was posted
+ * successfully.
+ */
+ if (!status && nctrl->cb_fn)
+ nctrl->cb_fn(nctrl);
+
+ octeon_free_soft_command(oct, sc);
+}
+
+static inline struct octeon_soft_command
+*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ struct octeon_soft_command *sc = NULL;
+ u8 *data;
+ size_t rdatasize;
+ u32 uddsize = 0, datasize = 0;
+
+ uddsize = (u32)(nctrl->ncmd.s.more * 8);
+
+ datasize = OCTNET_CMD_SIZE + uddsize;
+ rdatasize = (nctrl->wait_time) ? 16 : 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, datasize, rdatasize,
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return NULL;
+
+ memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt));
+
+ data = (u8 *)sc->virtdptr;
+
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+
+ octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
+ }
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
+ 0, 0, 0);
+
+ sc->callback = octnet_link_ctrl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = nctrl->wait_time;
+
+ return sc;
+}
+
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ int retval;
+ struct octeon_soft_command *sc = NULL;
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
+ __func__);
+ return -1;
+ }
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval) {
+ octeon_free_soft_command(oct, sc);
+ dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
+ __func__, retval);
+ return -1;
+ }
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
new file mode 100644
index 000000000000..0238857c8105
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -0,0 +1,227 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_nic.h
+ * \brief Host NIC Driver: Routine to send network data &
+ * control packet to Octeon.
+ */
+
+#ifndef __OCTEON_NIC_H__
+#define __OCTEON_NIC_H__
+
+/* Maximum number of 8-byte words can be sent in a NIC control message.
+ */
+#define MAX_NCTRL_UDD 32
+
+typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *);
+
+/* Structure of control information passed by the NIC module to the OSI
+ * layer when sending control commands to Octeon device software.
+ */
+struct octnic_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octnet_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ u64 dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ u64 dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ u64 udd[MAX_NCTRL_UDD];
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, OSI assumes no response is expected.
+ */
+ size_t wait_time;
+
+ /** The network device that issued the control command. */
+ u64 netpndev;
+
+ /** Callback function called when the command has been fetched */
+ octnic_ctrl_pkt_cb_fn_t cb_fn;
+};
+
+#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd))
+
+/** Structure of data information passed by the NIC module to the OSI
+ * layer when forwarding data to Octeon device software.
+ */
+struct octnic_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * OSI layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ u32 reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ u32 datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ struct octeon_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ u32 q_no;
+
+};
+
+/** Structure passed by NIC module to OSI layer to prepare a command to send
+ * network data to Octeon.
+ */
+union octnic_cmd_setup {
+ struct {
+ u32 ifidx:8;
+ u32 cksum_offset:7;
+ u32 gather:1;
+ u32 timestamp:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+
+ u32 rsvd:11;
+ union {
+ u32 datasize;
+ u32 gatherptrs;
+ } u;
+ } s;
+
+ u64 u64;
+
+};
+
+struct octnic_ctrl_params {
+ u32 resp_order;
+};
+
+static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
+{
+ return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
+ >= (oct->instr_queue[q_no]->max_count - 2));
+}
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ union octnic_packet_params packet_params;
+
+ memset(cmd, 0, sizeof(struct octeon_instr_64B));
+
+ ih = (struct octeon_instr_ih *)&cmd->ih;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih->fsz = 24;
+
+ ih->tagtype = ORDERED_TAG;
+ ih->grp = DEFAULT_POW_GRP;
+
+ if (tag)
+ ih->tag = tag;
+ else
+ ih->tag = LIO_DATA(setup->s.ifidx);
+
+ ih->raw = 1;
+ ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+
+ if (!setup->s.gather) {
+ ih->dlengsz = setup->s.u.datasize;
+ } else {
+ ih->gather = 1;
+ ih->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ irh = (struct octeon_instr_irh *)&cmd->irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
+ if (setup->s.cksum_offset) {
+ packet_params.s.csoffset = setup->s.cksum_offset;
+ packet_params.s.ipv4opts_ipv6exthdr =
+ setup->s.ipv4opts_ipv6exthdr;
+ }
+
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.ifidx = setup->s.ifidx;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+/** Allocate and a soft command with space for a response immediately following
+ * the commnad.
+ * @param oct - octeon device pointer
+ * @param cmd - pointer to the command structure, pre-filled for everything
+ * except the response.
+ * @param rdatasize - size in bytes of the response.
+ *
+ * @returns pointer to allocated buffer with command copied into it, and
+ * response space immediately following.
+ */
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize);
+
+/** Send a NIC data packet to the device
+ * @param oct - octeon device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata, u32 xmit_more);
+
+/** Send a NIC control packet to the device
+ * @param oct - octeon device pointer
+ * @param nctrl - control structure with command, timout, and callback info
+ * @param nparams - response control structure
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
new file mode 100644
index 000000000000..356796bf9b87
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -0,0 +1,766 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
+struct iq_post_status {
+ int status;
+ int index;
+};
+
+static void check_db_timeout(struct work_struct *work);
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+
+static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
+
+static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
+{
+ struct octeon_instr_queue *iq =
+ (struct octeon_instr_queue *)oct->instr_queue[iq_no];
+ return iq->iqcmd_64B;
+}
+
+#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
+
+/* Define this to return the request status comaptible to old code */
+/*#define OCTEON_USE_OLD_REQ_STATUS*/
+
+/* Return 0 on success, 1 on failure */
+int octeon_init_instr_queue(struct octeon_device *oct,
+ u32 iq_no, u32 num_descs)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_iq_config *conf = NULL;
+ u32 q_size;
+ struct cavium_wq *db_wq;
+
+ if (OCTEON_CN6XXX(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
+
+ if (!conf) {
+ dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
+ oct->chip_id);
+ return 1;
+ }
+
+ if (num_descs & (num_descs - 1)) {
+ dev_err(&oct->pci_dev->dev,
+ "Number of descriptors for instr queue %d not in power of 2.\n",
+ iq_no);
+ return 1;
+ }
+
+ q_size = (u32)conf->instr_type * num_descs;
+
+ iq = oct->instr_queue[iq_no];
+
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
+ if (!iq->base_addr) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return 1;
+ }
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ if (!iq->request_list) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ return 1;
+ }
+
+ memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
+
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
+ iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+
+ iq->iq_no = iq_no;
+ iq->fill_threshold = (u32)conf->db_min;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octeon_read_index = 0;
+ iq->flush_index = 0;
+ iq->last_db_time = 0;
+ iq->do_auto_flush = 1;
+ iq->db_timeout = (u32)conf->db_timeout;
+ atomic_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ spin_lock_init(&iq->lock);
+
+ oct->io_qmask.iq |= (1 << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+
+ oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ if (!oct->check_db_wq[iq_no].wq) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
+ iq_no);
+ return 1;
+ }
+
+ db_wq = &oct->check_db_wq[iq_no];
+
+ INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
+ db_wq->wk.ctxptr = oct;
+ db_wq->wk.ctxul = iq_no;
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+
+ return 0;
+}
+
+int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
+{
+ u64 desc_size = 0, q_size;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
+ flush_workqueue(oct->check_db_wq[iq_no].wq);
+ destroy_workqueue(oct->check_db_wq[iq_no].wq);
+
+ if (OCTEON_CN6XXX(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+
+ if (iq->request_list)
+ vfree(iq->request_list);
+
+ if (iq->base_addr) {
+ q_size = iq->max_count * desc_size;
+ lio_dma_free(oct, (u32)q_size, iq->base_addr,
+ iq->base_addr_dma);
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 0 on success, 1 on failure */
+int octeon_setup_iq(struct octeon_device *oct,
+ u32 iq_no,
+ u32 num_descs,
+ void *app_ctx)
+{
+ if (oct->instr_queue[iq_no]) {
+ dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[iq_no])
+ return 1;
+
+ memset(oct->instr_queue[iq_no], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ return 1;
+ }
+
+ oct->num_iqs++;
+ oct->fn_list.enable_io_queues(oct);
+ return 0;
+}
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct)
+{
+ int i, retry = 1000, pending, instr_cnt = 0;
+
+ do {
+ instr_cnt = 0;
+
+ /*for (i = 0; i < oct->num_iqs; i++) {*/
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ pending =
+ atomic_read(&oct->
+ instr_queue[i]->instr_pending);
+ if (pending)
+ __check_db_timeout(oct, i);
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ mmiowb();
+ iq->fill_cnt = 0;
+ iq->last_db_time = jiffies;
+ return;
+ }
+}
+
+static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
+ u8 *cmd)
+{
+ u8 *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline int
+__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ u32 index = -1;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
+ return -1;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return index;
+}
+
+static inline struct iq_post_status
+__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ struct iq_post_status st;
+
+ st.status = IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
+ st.status = IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
+ st.status = IQ_SEND_STOP;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return st;
+}
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *))
+{
+ if (reqtype > REQTYPE_LAST) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
+ __func__, reqtype);
+ return -EINVAL;
+ }
+
+ reqtype_free_fn[oct->octeon_id][reqtype] = fn;
+
+ return 0;
+}
+
+static inline void
+__add_to_request_list(struct octeon_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq)
+{
+ int reqtype;
+ void *buf;
+ u32 old = iq->flush_index;
+ u32 inst_count = 0;
+ unsigned pkts_compl = 0, bytes_compl = 0;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_irh *irh;
+
+ while (old != iq->octeon_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == REQTYPE_NONE)
+ goto skip_this;
+
+ octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
+ &bytes_compl);
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ case REQTYPE_RESP_NET_SG:
+ reqtype_free_fn[oct->octeon_id][reqtype](buf);
+ break;
+ case REQTYPE_RESP_NET:
+ case REQTYPE_SOFT_COMMAND:
+ sc = buf;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ spin_lock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ atomic_inc(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node, &oct->response_list
+ [OCTEON_ORDERED_SC_LIST].head);
+ spin_unlock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ } else {
+ if (sc->callback) {
+ sc->callback(oct, OCTEON_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev,
+ "%s Unknown reqtype: %d buf: %p at idx %d\n",
+ __func__, reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+ skip_this:
+ inst_count++;
+ INCR_INDEX_BY1(old, iq->max_count);
+ }
+ if (bytes_compl)
+ octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
+ bytes_compl);
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static inline void
+update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ u32 inst_processed = 0;
+
+ /* Calculate how many commands Octeon has read and move the read index
+ * accordingly.
+ */
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+
+ /* Move the NORESPONSE requests to the per-device completion list. */
+ if (iq->flush_index != iq->octeon_read_index)
+ inst_processed = lio_process_iq_request_list(oct, iq);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+}
+
+static void
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh)
+{
+ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
+ spin_lock_bh(&iq->lock);
+ update_iq_indices(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+}
+
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+{
+ struct octeon_instr_queue *iq;
+ u64 next_time;
+
+ if (!oct)
+ return;
+ iq = oct->instr_queue[iq_no];
+ if (!iq)
+ return;
+
+ /* If jiffies - last_db_time < db_timeout do nothing */
+ next_time = iq->last_db_time + iq->db_timeout;
+ if (!time_after(jiffies, (unsigned long)next_time))
+ return;
+ iq->last_db_time = jiffies;
+
+ /* Get the lock and prevent tasklets. This routine gets called from
+ * the poll thread. Instructions can now be posted in tasklet context
+ */
+ spin_lock_bh(&iq->lock);
+ if (iq->fill_cnt != 0)
+ ring_doorbell(oct, iq);
+
+ spin_unlock_bh(&iq->lock);
+
+ /* Flush the instruction queue */
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 1);
+}
+
+/* Called by the Poll thread at regular intervals to check the instruction
+ * queue for commands to be posted and for commands that were fetched by Octeon.
+ */
+static void check_db_timeout(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ unsigned long iq_no = wk->ctxul;
+ struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+
+ __check_db_timeout(oct, iq_no);
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+}
+
+int
+octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype)
+{
+ struct iq_post_status st;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ spin_lock_bh(&iq->lock);
+
+ st = __post_command2(oct, iq, force_db, cmd);
+
+ if (st.status != IQ_SEND_FAILED) {
+ octeon_report_sent_bytes_to_bql(buf, reqtype);
+ __add_to_request_list(iq, st.index, buf, reqtype);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
+
+ if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ ring_doorbell(oct, iq);
+ } else {
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
+ }
+
+ spin_unlock_bh(&iq->lock);
+
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 2);
+
+ return st.status;
+}
+
+void
+octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode,
+ u8 subcode,
+ u32 irh_ossp,
+ u64 ossp0,
+ u64 ossp1)
+{
+ struct octeon_config *oct_cfg;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ BUG_ON(opcode > 15);
+ BUG_ON(subcode > 127);
+
+ oct_cfg = octeon_get_conf(oct);
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->tagtype = ATOMIC_TAG;
+ ih->tag = LIO_CONTROL;
+ ih->raw = 1;
+ ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih->dlengsz = sc->datasize;
+ ih->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.ossp[0] = ossp0;
+ sc->cmd.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ irh->len = 4;
+ ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ } else {
+ irh->rflag = 0;
+ irh->len = 2;
+ ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ }
+
+ while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
+ sc->iq_no++;
+}
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ if (ih->dlengsz) {
+ BUG_ON(!sc->dmadptr);
+ sc->cmd.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ BUG_ON(!sc->dmarptr);
+ BUG_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ sc->cmd.rptr = sc->dmarptr;
+ }
+
+ if (sc->wait_time)
+ sc->timeout = jiffies + sc->wait_time;
+
+ return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+}
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
+{
+ int i;
+ u64 dma_addr;
+ struct octeon_soft_command *sc;
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+ spin_lock_init(&oct->sc_buf_pool.lock);
+ atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
+
+ for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
+ sc = (struct octeon_soft_command *)
+ lio_dma_alloc(oct,
+ SOFT_COMMAND_BUFFER_SIZE,
+ (dma_addr_t *)&dma_addr);
+ if (!sc)
+ return 1;
+
+ sc->dma_addr = dma_addr;
+ sc->size = SOFT_COMMAND_BUFFER_SIZE;
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+ }
+
+ return 0;
+}
+
+int octeon_free_sc_buffer_pool(struct octeon_device *oct)
+{
+ struct list_head *tmp, *tmp2;
+ struct octeon_soft_command *sc;
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
+ list_del(tmp);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ lio_dma_free(oct, sc->size, sc, sc->dma_addr);
+ }
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ return 0;
+}
+
+struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize,
+ u32 rdatasize,
+ u32 ctxsize)
+{
+ u64 dma_addr;
+ u32 size;
+ u32 offset = sizeof(struct octeon_soft_command);
+ struct octeon_soft_command *sc = NULL;
+ struct list_head *tmp;
+
+ BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ SOFT_COMMAND_BUFFER_SIZE);
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ if (list_empty(&oct->sc_buf_pool.head)) {
+ spin_unlock(&oct->sc_buf_pool.lock);
+ return NULL;
+ }
+
+ list_for_each(tmp, &oct->sc_buf_pool.head)
+ break;
+
+ list_del(tmp);
+
+ atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ dma_addr = sc->dma_addr;
+ size = sc->size;
+
+ memset(sc, 0, sc->size);
+
+ sc->dma_addr = dma_addr;
+ sc->size = size;
+
+ if (ctxsize) {
+ sc->ctxptr = (u8 *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (u8 *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ BUG_ON(rdatasize < 16);
+ sc->virtrptr = (u8 *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+
+ atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
new file mode 100644
index 000000000000..091f537a946e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -0,0 +1,178 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+static void oct_poll_req_completion(struct work_struct *work);
+
+int octeon_setup_response_list(struct octeon_device *oct)
+{
+ int i, ret = 0;
+ struct cavium_wq *cwq;
+
+ for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
+ INIT_LIST_HEAD(&oct->response_list[i].head);
+ spin_lock_init(&oct->response_list[i].lock);
+ atomic_set(&oct->response_list[i].pending_req_count, 0);
+ }
+
+ oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ if (!oct->dma_comp_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
+ return -ENOMEM;
+ }
+
+ cwq = &oct->dma_comp_wq;
+ INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
+ cwq->wk.ctxptr = oct;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+
+ return ret;
+}
+
+void octeon_delete_response_list(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
+ flush_workqueue(oct->dma_comp_wq.wq);
+ destroy_workqueue(oct->dma_comp_wq.wq);
+}
+
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit)
+{
+ struct octeon_response_list *ordered_sc_list;
+ struct octeon_soft_command *sc;
+ int request_complete = 0;
+ int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
+ u32 status;
+ u64 status64;
+ struct octeon_instr_rdp *rdp;
+
+ ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
+
+ do {
+ spin_lock_bh(&ordered_sc_list->lock);
+
+ if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ return 1;
+ }
+
+ sc = (struct octeon_soft_command *)ordered_sc_list->
+ head.next;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ status = OCTEON_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
+ sc->cmd.rptr, rdp->rlen,
+ DMA_FROM_DEVICE);
+ status64 = *sc->status_word;
+
+ if (status64 != COMPLETION_WORD_INIT) {
+ if ((status64 & 0xff) != 0xff) {
+ octeon_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ status = (u32)(status64 &
+ 0xffffffffULL);
+ }
+ }
+ } else if (force_quit || (sc->timeout &&
+ time_after(jiffies, (unsigned long)sc->timeout))) {
+ status = OCTEON_REQUEST_TIMEOUT;
+ }
+
+ if (status != OCTEON_REQUEST_PENDING) {
+ /* we have received a response or we have timed out */
+ /* remove node from linked list */
+ list_del(&sc->node);
+ atomic_dec(&octeon_dev->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(octeon_dev, status,
+ sc->callback_arg);
+
+ request_complete++;
+
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit
+ * and let this function be invoked the next time the poll
+ * thread runs
+ * to process the remaining requests. This function can take up
+ * the entire CPU if there is no upper limit to the requests
+ * processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static void oct_poll_req_completion(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ struct cavium_wq *cwq = &oct->dma_comp_wq;
+
+ lio_process_ordered_list(oct, 0);
+
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
new file mode 100644
index 000000000000..7a48752dcb10
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -0,0 +1,140 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file response_manager.h
+ * \brief Host Driver: Response queues for host instructions.
+ */
+
+#ifndef __RESPONSE_MANAGER_H__
+#define __RESPONSE_MANAGER_H__
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Head of a response list. There are several response lists in the
+ * system. One for each response order- Unordered, ordered
+ * and 1 for noresponse entries on each instruction queue.
+ */
+struct octeon_response_list {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t pending_req_count;
+};
+
+/** The type of response list.
+ */
+enum {
+ OCTEON_ORDERED_LIST = 0,
+ OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
+ OCTEON_UNORDERED_BLOCKING_LIST = 2,
+ OCTEON_ORDERED_SC_LIST = 3
+};
+
+/** Response Order values for a Octeon Request. */
+enum {
+ OCTEON_RESP_ORDERED = 0,
+ OCTEON_RESP_UNORDERED = 1,
+ OCTEON_RESP_NORESPONSE = 2
+};
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ---------------------------------
+ * | | |
+ * ---------------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+
+/*------------ Error codes used by host driver -----------------*/
+#define DRIVER_MAJOR_ERROR_CODE 0x0000
+
+/** A value of 0x00000000 indicates no error i.e. success */
+#define DRIVER_ERROR_NONE 0x00000000
+
+/** (Major number: 0x0000; Minor Number: 0x0001) */
+#define DRIVER_ERROR_REQ_PENDING 0x00000001
+#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
+#define DRIVER_ERROR_REQ_EINTR 0x00000004
+#define DRIVER_ERROR_REQ_ENXIO 0x00000006
+#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C
+#define DRIVER_ERROR_REQ_EINVAL 0x00000016
+#define DRIVER_ERROR_REQ_FAILED 0x000000ff
+
+/** Status for a request.
+ * If a request is not queued to Octeon by the driver, the driver returns
+ * an error condition that's describe by one of the OCTEON_REQ_ERR_* value
+ * below. If the request is successfully queued, the driver will return
+ * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and
+ * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the
+ * response for request failed to arrive before a time-out period or if
+ * the request processing * got interrupted due to a signal respectively.
+ */
+enum {
+ OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE),
+ OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING),
+ OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT),
+ OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR),
+ OCTEON_REQUEST_NO_DEVICE = (0x00000021),
+ OCTEON_REQUEST_NOT_RUNNING,
+ OCTEON_REQUEST_INVALID_IQ,
+ OCTEON_REQUEST_INVALID_BUFCNT,
+ OCTEON_REQUEST_INVALID_RESP_ORDER,
+ OCTEON_REQUEST_NO_MEMORY,
+ OCTEON_REQUEST_INVALID_BUFSIZE,
+ OCTEON_REQUEST_NO_PENDING_ENTRY,
+ OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
+
+};
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param octeon_dev - the octeon device structure.
+ */
+int octeon_setup_response_list(struct octeon_device *octeon_dev);
+
+void octeon_delete_response_list(struct octeon_device *octeon_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param octeon_dev - the octeon device structure.
+ * @param force_quit - the request is forced to timeout if this is 1
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit);
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b96e4bfcac41..8f7aa53a4c4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1025,19 +1025,19 @@ int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
{
struct adapter *adapter = phy->adapter;
const struct firmware *fw;
- char buf[64];
+ const char *fw_name;
u32 csum;
const __be32 *p;
u16 *cache = phy->phy_cache;
- int i, ret;
-
- snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
+ int i, ret = -EINVAL;
- ret = request_firmware(&fw, buf, &adapter->pdev->dev);
+ fw_name = get_edc_fw_name(edc_idx);
+ if (fw_name)
+ ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
if (ret < 0) {
dev_err(&adapter->pdev->dev,
"could not upgrade firmware: unable to load %s\n",
- buf);
+ fw_name);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index bf2b822d0e8e..2e70228272b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,17 +46,19 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
- MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
- ID_LEN = 16, /* ID length */
- PN_LEN = 16, /* Part Number length */
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
+ MACADDR_LEN = 12, /* MAC Address length */
};
enum {
@@ -280,6 +282,7 @@ struct vpd_params {
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
+ u8 na[MACADDR_LEN + 1];
};
struct pci_params {
@@ -946,6 +949,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
}
/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
@@ -1229,7 +1248,7 @@ void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
@@ -1251,7 +1270,8 @@ unsigned int t4_get_regs_len(struct adapter *adapter);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1317,6 +1337,10 @@ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
const unsigned int *valp);
int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
@@ -1342,6 +1366,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
@@ -1378,6 +1403,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
@@ -1407,6 +1435,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 3719807efddd..484eb8c37489 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -182,6 +182,95 @@ static const struct file_operations cim_la_fops = {
.release = seq_release_private
};
+static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Cntl ID DataBE Addr Data\n");
+ } else if (idx < CIM_PIFLA_SIZE) {
+ seq_printf(seq, " %02x %02x %04x %08x %08x%08x%08x%08x\n",
+ (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
+ p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_PIFLA_SIZE)
+ seq_puts(seq, "\nCntl ID Data\n");
+ seq_printf(seq, " %02x %02x %08x%08x%08x%08x\n",
+ (p[4] >> 6) & 0xff, p[4] & 0x3f,
+ p[3], p[2], p[1], p[0]);
+ }
+ return 0;
+}
+
+static int cim_pif_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
+ cim_pif_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_pif_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+ return 0;
+}
+
+static const struct file_operations cim_pif_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_pif_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "\n");
+ } else if (idx < CIM_MALA_SIZE) {
+ seq_printf(seq, "%02x%08x%08x%08x%08x\n",
+ p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_MALA_SIZE)
+ seq_puts(seq,
+ "\nCnt ID Tag UE Data RDY VLD\n");
+ seq_printf(seq, "%3u %2u %x %u %08x%08x %u %u\n",
+ (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+ (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+ (p[1] >> 2) | ((p[2] & 3) << 30),
+ (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+ p[0] & 1);
+ }
+ return 0;
+}
+
+static int cim_ma_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
+ cim_ma_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_ma_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 5 * CIM_MALA_SIZE);
+ return 0;
+}
+
+static const struct file_operations cim_ma_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_ma_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
static int cim_qcfg_show(struct seq_file *seq, void *v)
{
static const char * const qname[] = {
@@ -663,6 +752,39 @@ static const struct file_operations pm_stats_debugfs_fops = {
.write = pm_stats_clear
};
+static int tx_rate_show(struct seq_file *seq, void *v)
+{
+ u64 nrate[NCHAN], orate[NCHAN];
+ struct adapter *adap = seq->private;
+
+ t4_get_chan_txrate(adap, nrate, orate);
+ if (adap->params.arch.nchan == NCHAN) {
+ seq_puts(seq, " channel 0 channel 1 "
+ "channel 2 channel 3\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1],
+ (unsigned long long)nrate[2],
+ (unsigned long long)nrate[3]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1],
+ (unsigned long long)orate[2],
+ (unsigned long long)orate[3]);
+ } else {
+ seq_puts(seq, " channel 0 channel 1\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1]);
+ }
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
static const char * const dec_fac[] = {
@@ -2128,6 +2250,8 @@ int t4_setup_debugfs(struct adapter *adap)
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, S_IRUSR, 0 },
+ { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
+ { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
@@ -2163,6 +2287,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
{ "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+ { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
{ "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 0194c91a0486..687acf71fa15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -646,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -679,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 305715484004..0e27f2266e6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -481,7 +481,7 @@ static int link_start(struct net_device *dev)
}
}
if (ret == 0)
- ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
local_bh_disable();
@@ -2007,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init);
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- int ret;
- ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
- return ret;
+ return t4_sge_ctxt_flush(adap, adap->mbox);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -3715,7 +3712,7 @@ static int adap_init0(struct adapter *adap)
* the firmware. On the other hand, we need these fairly early on
* so we do this right after getting ahold of the firmware.
*/
- ret = get_vpd_params(adap, &adap->params.vpd);
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
@@ -4071,8 +4068,8 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- t4_init_tp_params(adap);
adap->flags |= FW_OK;
+ t4_init_tp_params(adap);
return 0;
/*
@@ -4538,6 +4535,11 @@ static void free_some_resources(struct adapter *adapter)
for_each_port(adapter, i)
if (adapter->port[i]) {
+ struct port_info *pi = adap2pinfo(adapter, i);
+
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
@@ -4733,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
+ } else if (adapter->params.nports == 1) {
+ /* If we don't have a connection to the firmware -- possibly
+ * because of an error -- grab the raw VPD parameters so we
+ * can set the proper MAC Address on the debug network
+ * interface that we've created.
+ */
+ u8 hw_addr[ETH_ALEN];
+ u8 *na = adapter->params.vpd.na;
+
+ err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+ if (!err) {
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ t4_set_hw_addr(adapter, 0, hw_addr);
+ }
}
- /*
- * Configure queues and allocate tables now, they can be needed as
+ /* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
cfg_queues(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c21ab2686a69..9d93f4c27300 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1729,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
}
/**
- * get_vpd_params - read VPD parameters from VPD EEPROM
+ * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- u32 cclk_param, cclk_val;
- int i, ret, addr;
- int ec, sn, pn;
+ int i, ret = 0, addr;
+ int ec, sn, pn, na;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -1747,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
@@ -1812,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
FIND_VPD_KW(pn, "PN");
+ FIND_VPD_KW(na, "NA");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1824,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
+ memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ strim((char *)p->na);
- /*
- * Ask firmware for the Core Clock since it knows how to translate the
+out:
+ vfree(vpd);
+ return ret;
+}
+
+/**
+ * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM and retrieves the Core
+ * Clock. This can only be called after a connection to the firmware
+ * is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /* Grab the raw VPD parameters.
+ */
+ ret = t4_get_raw_vpd_params(adapter, p);
+ if (ret)
+ return ret;
+
+ /* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
- ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &cclk_param, &cclk_val);
-out:
- vfree(vpd);
if (ret)
return ret;
p->cclk = cclk_val;
@@ -2559,6 +2586,61 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr)
+{
+ int i, j;
+ u32 cfg, val, req, rsp;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ val = t4_read_reg(adap, CIM_DEBUGSTS_A);
+ req = POLADBGWRPTR_G(val);
+ rsp = PILADBGWRPTR_G(val);
+ if (pif_req_wrptr)
+ *pif_req_wrptr = req;
+ if (pif_rsp_wrptr)
+ *pif_rsp_wrptr = rsp;
+
+ for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+ for (j = 0; j < 6; j++) {
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
+ PILADBGRDPTR_V(rsp));
+ *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
+ *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
+ req++;
+ rsp++;
+ }
+ req = (req + 2) & POLADBGRDPTR_M;
+ rsp = (rsp + 2) & PILADBGRDPTR_M;
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+ u32 cfg;
+ int i, j, idx;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ for (i = 0; i < CIM_MALA_SIZE; i++) {
+ for (j = 0; j < 5; j++) {
+ idx = 8 * i + j;
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
+ PILADBGRDPTR_V(idx));
+ *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
+ *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
+ }
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
{
unsigned int i, j;
@@ -2579,7 +2661,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
FW_PORT_CAP_ANEG)
/**
- * t4_link_start - apply link configuration to MAC/PHY
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
@@ -2591,7 +2673,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
@@ -3606,6 +3688,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int ret, i;
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ (rw ? FW_CMD_READ_F :
+ FW_CMD_WRITE_F) |
+ FW_LDST_CMD_ADDRSPACE_V(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (!ret && rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+}
+
+/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
@@ -3614,8 +3730,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+ else
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
}
/**
@@ -3641,8 +3760,11 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+ else
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
if (idx >= 0 && idx < rss_key_addr_cnt) {
if (rss_key_addr_cnt > 16)
@@ -3667,8 +3789,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- valp, 1, TP_RSS_PF0_CONFIG_A + index);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, valp, 1,
+ TP_RSS_PF0_CONFIG_A + index, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ valp, 1, TP_RSS_PF0_CONFIG_A + index);
}
/**
@@ -3703,10 +3829,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfl, 1, TP_RSS_VFL_CONFIG_A);
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfh, 1, TP_RSS_VFH_CONFIG_A);
+ if (adapter->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+ t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfl, 1, TP_RSS_VFL_CONFIG_A);
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfh, 1, TP_RSS_VFH_CONFIG_A);
+ }
}
/**
@@ -3719,8 +3850,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmap, 1, TP_RSS_PF_MAP_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmap, 1, TP_RSS_PF_MAP_A);
return pfmap;
}
@@ -3734,8 +3868,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmask, 1, TP_RSS_PF_MSK_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmask, 1, TP_RSS_PF_MSK_A);
return pfmask;
}
@@ -4054,6 +4191,52 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
}
}
+/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks. The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+ u64 v = bytes256 * adap->params.vpd.cclk;
+
+ return v * 62 + v / 2;
+}
+
+/**
+ * t4_get_chan_txrate - get the current per channel Tx rates
+ * @adap: the adapter
+ * @nic_rate: rates for NIC traffic
+ * @ofld_rate: rates for offloaded traffic
+ *
+ * Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ * for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TX_TRATE_A);
+ nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
+ nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
+ nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
+ }
+
+ v = t4_read_reg(adap, TP_TX_ORATE_A);
+ ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
+ ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
+ ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
+ }
+}
+
/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
@@ -4530,6 +4713,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
}
/**
+ * t4_sge_ctxt_flush - flush the SGE context cache
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a FW command through the given mailbox to flush the
+ * SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+ int ret;
+ u32 ldst_addrspace;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -5226,6 +5435,33 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) |
+ FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+ c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
* t4_set_rxmode - set Rx properties of a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -5767,6 +6003,22 @@ static int get_flash_params(struct adapter *adap)
return 0;
}
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+ u16 val;
+ u32 pcie_cap;
+
+ pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+ val |= range;
+ pci_write_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -5839,6 +6091,9 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.nports = 1;
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
@@ -5899,7 +6154,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
@@ -6038,12 +6293,19 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A);
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A);
+ if (adap->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, 1);
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A);
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f9a2cb164737..c8488f430d19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -59,6 +59,8 @@ enum {
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+ CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
+ CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */
CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 1a9a6f334d2d..d7ca106927b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index af3462db5adb..375a825573b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1361,6 +1361,42 @@
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
+#define TP_TX_ORATE_A 0x7ebc
+
+#define OFDRATE3_S 24
+#define OFDRATE3_M 0xffU
+#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M)
+
+#define OFDRATE2_S 16
+#define OFDRATE2_M 0xffU
+#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M)
+
+#define OFDRATE1_S 8
+#define OFDRATE1_M 0xffU
+#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M)
+
+#define OFDRATE0_S 0
+#define OFDRATE0_M 0xffU
+#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M)
+
+#define TP_TX_TRATE_A 0x7ed0
+
+#define TNLRATE3_S 24
+#define TNLRATE3_M 0xffU
+#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M)
+
+#define TNLRATE2_S 16
+#define TNLRATE2_M 0xffU
+#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M)
+
+#define TNLRATE1_S 8
+#define TNLRATE1_M 0xffU
+#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M)
+
+#define TNLRATE0_S 0
+#define TNLRATE0_M 0xffU
+#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M)
+
#define TP_VLAN_PRI_MAP_A 0x140
#define FRAGMENTATION_S 9
@@ -2759,6 +2795,33 @@
#define CIM_IBQ_DBG_DATA_A 0x7b68
#define CIM_OBQ_DBG_DATA_A 0x7b6c
+#define CIM_DEBUGCFG_A 0x7b70
+#define CIM_DEBUGSTS_A 0x7b74
+
+#define POLADBGRDPTR_S 23
+#define POLADBGRDPTR_M 0x1ffU
+#define POLADBGRDPTR_V(x) ((x) << POLADBGRDPTR_S)
+
+#define POLADBGWRPTR_S 16
+#define POLADBGWRPTR_M 0x1ffU
+#define POLADBGWRPTR_G(x) (((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M)
+
+#define PILADBGRDPTR_S 14
+#define PILADBGRDPTR_M 0x1ffU
+#define PILADBGRDPTR_V(x) ((x) << PILADBGRDPTR_S)
+
+#define PILADBGWRPTR_S 0
+#define PILADBGWRPTR_M 0x1ffU
+#define PILADBGWRPTR_G(x) (((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M)
+
+#define LADBGEN_S 12
+#define LADBGEN_V(x) ((x) << LADBGEN_S)
+#define LADBGEN_F LADBGEN_V(1U)
+
+#define CIM_PO_LA_DEBUGDATA_A 0x7b78
+#define CIM_PI_LA_DEBUGDATA_A 0x7b7c
+#define CIM_PO_LA_MADEBUGDATA_A 0x7b80
+#define CIM_PI_LA_MADEBUGDATA_A 0x7b84
#define UPDBGLARDEN_S 1
#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index aceb1e8cacc8..ab4674684acc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -772,7 +772,7 @@ struct fw_ldst_cmd {
} addrval;
struct fw_ldst_idctxt {
__be32 physid;
- __be32 msg_pkd;
+ __be32 msg_ctxtflush;
__be32 ctxt_data7;
__be32 ctxt_data6;
__be32 ctxt_data5;
@@ -834,6 +834,10 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+#define FW_LDST_CMD_CTXTFLUSH_S 30
+#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U)
+
#define FW_LDST_CMD_PADDR_S 8
#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index a31b57adcb37..d106186f4f4a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -33,8 +33,8 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
return -EPROTONOSUPPORT;
};
data.type = FILTER_IPV4_5TUPLE;
- data.u.ipv4.src_addr = ntohl(keys->addrs.src);
- data.u.ipv4.dst_addr = ntohl(keys->addrs.dst);
+ data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+ data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
data.u.ipv4.src_port = ntohs(keys->ports.src);
data.u.ipv4.dst_port = ntohs(keys->ports.dst);
data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
@@ -158,8 +158,8 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct enic_rfs_fltr_node *tpos;
hlist_for_each_entry(tpos, h, node)
- if (tpos->keys.addrs.src == k->addrs.src &&
- tpos->keys.addrs.dst == k->addrs.dst &&
+ if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+ tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
tpos->keys.ports.ports == k->ports.ports &&
tpos->keys.basic.ip_proto == k->basic.ip_proto &&
tpos->keys.basic.n_proto == k->basic.n_proto)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 117c0968dd0b..f3f1601a76f3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
+ int err;
- enic_dev_fw_info(enic, &fw_info);
+ err = enic_dev_fw_info(enic, &fw_info);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ * For other failures, like devcmd failure, we return previously
+ * recorded info.
+ */
+ if (err == -ENOMEM)
+ return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
+ int err;
- enic_dev_stats_dump(enic, &vstats);
+ err = enic_dev_stats_dump(enic, &vstats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return;
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
@@ -346,10 +360,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
}
- fsp->h_u.tcp_ip4_spec.ip4src = n->keys.addrs.src;
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.addrs.dst;
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 204bd182473b..eadae1b412c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ int err;
- enic_dev_stats_dump(enic, &stats);
+ err = enic_dev_stats_dump(enic, &stats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return net_stats;
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_poll_unlock_napi(&enic->rq[rq]);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[rq]);
return work_done;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 36a2ed606c91..c4b2183bf352 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
struct vnic_rq_buf *buf;
u32 fetch_index;
unsigned int count = rq->ring.desc_count;
+ int i;
buf = rq->to_clean;
- while (vnic_rq_desc_used(rq) > 0) {
-
+ for (i = 0; i < rq->ring.desc_count; i++) {
(*buf_clean)(rq, buf);
-
- buf = rq->to_clean = buf->next;
- rq->ring.desc_avail++;
+ buf = buf->next;
}
+ rq->ring.desc_avail = rq->ring.desc_count - 1;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 41150543906a..9eac3227d2ca 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1742,9 +1742,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure while reading FAT data\n");
@@ -1789,8 +1789,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
- pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va, get_fat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2237,12 +2237,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
}
- memset(cmd.va, 0, cmd.size);
spin_lock_bh(&adapter->mcc_lock);
@@ -2267,7 +2267,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
}
err:
spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2742,7 +2742,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2776,7 +2777,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2827,8 +2828,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -2855,8 +2857,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
if (attribs_cmd.va)
- pci_free_consistent(adapter->pdev, attribs_cmd.size,
- attribs_cmd.va, attribs_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+ attribs_cmd.va, attribs_cmd.dma);
return status;
}
@@ -2994,9 +2996,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
- get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -3069,8 +3072,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
out:
spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
@@ -3123,8 +3126,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -3325,7 +3328,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -3360,7 +3364,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3374,8 +3379,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
+ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va)
return -ENOMEM;
@@ -3397,8 +3403,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
err:
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
return status;
}
@@ -3411,8 +3417,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
+ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3430,8 +3437,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
level = cfgs->module[0].trace_lvl[j].dbg_lvl;
}
}
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
err:
return level;
}
@@ -3629,7 +3636,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3669,7 +3677,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
err:
mutex_unlock(&adapter->mbox_lock);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3690,7 +3699,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -3736,7 +3746,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
res->vf_if_cap_flags = vf_res->cap_flags;
err:
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
@@ -3751,7 +3762,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -3767,7 +3779,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
status = be_cmd_notify_wait(adapter, &wrb);
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 675cbacef772..b2476dbfd103 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -263,8 +263,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
- read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -288,8 +288,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
break;
}
}
- pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
return status;
}
@@ -825,8 +825,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
- &ddrdma_cmd.dma, GFP_KERNEL);
+ ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
if (!ddrdma_cmd.va)
return -ENOMEM;
@@ -948,8 +949,9 @@ static int be_read_eeprom(struct net_device *netdev,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
- &eeprom_cmd.dma, GFP_KERNEL);
+ eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
if (!eeprom_cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1365a56f78df..c0f34845cf59 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4855,8 +4855,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -4965,8 +4965,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
}
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
+ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -5521,16 +5521,15 @@ static int be_drv_init(struct be_adapter *adapter)
int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
+ mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va)
return -ENOMEM;
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
- memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a583d89b13c4..a15663ad7f5e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
writel(tmp, fep->hwp + FEC_ATIME_INC);
+ corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
/* dummy read to update the timer. */
timecounter_read(&fep->tc);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9b3639eae676..c5f299d74549 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -86,7 +86,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
cbd_t __iomem *bdp;
- struct sk_buff *skb, *skbn, *skbt;
+ struct sk_buff *skb, *skbn;
int received = 0;
u16 pkt_len, sc;
int curidx;
@@ -161,10 +161,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
- /* swap */
- skbt = skb;
- skb = skbn;
- skbn = skbt;
+ swap(skb, skbn);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b9df0cbd0a38..b60a34d982a9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2999,7 +2999,7 @@ static struct platform_driver emac_driver = {
static void __init emac_make_bootlist(void)
{
struct device_node *np = NULL;
- int j, max, i = 0, k;
+ int j, max, i = 0;
int cell_indices[EMAC_BOOT_LIST_SIZE];
/* Collect EMACs */
@@ -3026,12 +3026,8 @@ static void __init emac_make_bootlist(void)
for (i = 0; max > 1 && (i < (max - 1)); i++)
for (j = i; j < max; j++) {
if (cell_indices[i] > cell_indices[j]) {
- np = emac_boot_list[i];
- emac_boot_list[i] = emac_boot_list[j];
- emac_boot_list[j] = np;
- k = cell_indices[i];
- cell_indices[i] = cell_indices[j];
- cell_indices[j] = k;
+ swap(emac_boot_list[i], emac_boot_list[j]);
+ swap(cell_indices[i], cell_indices[j]);
}
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4b9d9f88af70..c6dc9683429e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -124,7 +124,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
char *p = (char *)data;
- int i;
+ unsigned int i;
switch (stringset) {
case ETH_SS_TEST:
@@ -143,12 +143,13 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- if (interface->hw.mac.type != fm10k_mac_vf)
+ if (interface->hw.mac.type != fm10k_mac_vf) {
for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ }
for (i = 0; i < interface->hw.mac.max_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5b08e6284a3c..94571e6e790c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -400,11 +400,31 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
+ struct fm10k_vf_info *vf_info)
+{
+ struct fm10k_hw *hw = &interface->hw;
+
+ /* assigning the MAC address will send a mailbox message */
+ fm10k_mbx_lock(interface);
+
+ /* disable LPORT for this VF which clears switch rules */
+ hw->iov.ops.reset_lport(hw, vf_info);
+
+ /* assign new MAC+VLAN for this VF */
+ hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
+
+ /* re-enable the LPORT for this VF */
+ hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
+ FM10K_VF_FLAG_MULTI_CAPABLE);
+
+ fm10k_mbx_unlock(interface);
+}
+
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
- struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
@@ -419,13 +439,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
vf_info = &iov_data->vf_info[vf_idx];
ether_addr_copy(vf_info->mac, mac);
- /* assigning the MAC will send a mailbox message so lock is needed */
- fm10k_mbx_lock(interface);
-
- /* assign MAC address to VF */
- hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
- fm10k_mbx_unlock(interface);
+ fm10k_reset_vf_info(interface, vf_info);
return 0;
}
@@ -455,16 +469,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
/* record default VLAN ID for VF */
vf_info->pf_vid = vid;
- /* assigning the VLAN will send a mailbox message so lock is needed */
- fm10k_mbx_lock(interface);
-
/* Clear the VLAN table for the VF */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
- /* Update VF assignment and trigger reset */
- hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
- fm10k_mbx_unlock(interface);
+ fm10k_reset_vf_info(interface, vf_info);
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index c754b2027281..982fdcdc795b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
+ if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -518,44 +534,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
}
/**
- * fm10k_pull_tail - fm10k specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
- *
- * This function is an fm10k specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void fm10k_pull_tail(struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* fm10k_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -580,10 +558,6 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
return true;
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- fm10k_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1b2738380518..1a4b52637de9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1259,16 +1259,11 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
struct fm10k_mbx_info *mbx)
{
const u32 *hdr = &mbx->mbx_hdr;
- s32 err_no;
u16 head;
/* we will need to pull all of the fields for verification */
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
- /* we only have lower 10 bits of error number so add upper bits */
- err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
- err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
-
switch (mbx->state) {
case FM10K_STATE_OPEN:
case FM10K_STATE_DISCONNECT:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2f4f41b7eae7..99228bf46c12 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -923,18 +923,12 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
- s32 err;
-
- if (!is_multicast_ether_addr(addr))
- return -EADDRNOTAVAIL;
/* update table with current entries */
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
- err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
- if (err)
- return err;
+ hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
}
return 0;
@@ -1339,8 +1333,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
dglort.glort = interface->glort;
- if (l2_accel)
- dglort.shared_l = fls(l2_accel->size);
+ dglort.shared_l = fls(l2_accel->size);
hw->mac.ops.configure_dglort_map(hw, &dglort);
/* If table is empty remove it */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index df9fda38bdd1..ce53ff25f88d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1559,6 +1559,7 @@ void fm10k_down(struct fm10k_intfc *interface)
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
+ fm10k_clean_all_rx_rings(interface);
}
/**
@@ -1740,30 +1741,18 @@ static int fm10k_probe(struct pci_dev *pdev,
struct fm10k_intfc *interface;
struct fm10k_hw *hw;
int err;
- u64 dma_mask;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- /* By default fm10k only supports a 48 bit DMA mask */
- dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
-
- if ((dma_mask <= DMA_BIT_MASK(32)) ||
- dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
- dma_mask &= DMA_BIT_MASK(32);
-
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: %d\n", err);
+ goto err_dma;
}
err = pci_request_selected_regions(pdev,
@@ -1772,7 +1761,7 @@ static int fm10k_probe(struct pci_dev *pdev,
fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
- "pci_request_selected_regions failed 0x%x\n", err);
+ "pci_request_selected_regions failed: %d\n", err);
goto err_pci_reg;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 891e21874b2a..3ca0233b3ea2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1046,6 +1046,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
}
+ /* repeat the first ring for all the remaining VF rings */
+ for (i = queues_per_pool; i < qmap_stride; i++) {
+ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+ fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+ }
+
return 0;
}
@@ -1345,6 +1351,14 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1, false);
+ /* we need to clear VF_FLAG_ENABLED flags in order to ensure
+ * that we actually re-enable the LPORT state below. Note that
+ * this has no impact if the VF is already disabled, as the
+ * flags are already cleared.
+ */
+ if (!err)
+ vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
/* when enabling the port we should reset the rate limiters */
hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
@@ -1786,8 +1800,8 @@ static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
return FM10K_ERR_PARAM;
- if (ppb < 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE;
+ if (ppb > 0)
+ systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 7ab1db4fff32..40a0dbc62a04 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -81,26 +81,26 @@ struct fm10k_mac_update {
__le16 glort;
u8 flags;
u8 action;
-};
+} __packed;
struct fm10k_global_table_data {
__le32 used;
__le32 avail;
-};
+} __packed;
struct fm10k_swapi_error {
__le32 status;
struct fm10k_global_table_data mac;
struct fm10k_global_table_data nexthop;
struct fm10k_global_table_data ffu;
-};
+} __packed;
struct fm10k_swapi_1588_timestamp {
__le64 egress;
__le64 ingress;
__le16 dglort;
__le16 sglort;
-};
+} __packed;
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 9043633c3e50..b4945e8abe03 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -70,16 +70,16 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
* if none are present then insert skb in tail of list
*/
skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb)
+ if (!skb) {
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
__skb_queue_tail(list, clone);
+ }
spin_unlock_irqrestore(&list->lock, flags);
/* if list is already has one then we just free the clone */
if (skb)
- kfree_skb(skb);
- else
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ dev_kfree_skb(clone);
}
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
@@ -103,9 +103,10 @@ void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
if (!skb)
return;
- /* timestamp the sk_buff and return it to the socket */
+ /* timestamp the sk_buff and free out copy */
fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_complete_tx_timestamp(skb, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 4af96686c584..2a17d82fa37d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -369,7 +369,7 @@ struct fm10k_hw;
/* Registers contained in BAR 4 for Switch management */
#define FM10K_SW_SYSTIME_ADJUST 0x0224D
#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
enum fm10k_int_source {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index aca9cef50d81..ec76c3fa3a04 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -318,6 +318,7 @@ struct i40e_pf {
#endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 34170eabca7d..da0faf478af0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ /* By default we are in VEPA mode, if this is the first VF/VMDq
+ * VSI to be added switch to VEB mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf,
+ BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ }
+
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
if (vsi)
dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0a3e928a2b00..52d7d8b8f1f9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6107,6 +6107,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
@@ -8038,7 +8042,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
} else if (mode != veb->bridge_mode) {
/* Existing HW bridge but different mode needs reset */
veb->bridge_mode = mode;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+ if (mode == BRIDGE_MODE_VEB)
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
break;
}
}
@@ -8350,11 +8359,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ (i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
- cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
- cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -8753,6 +8763,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
+ /* We come up by default in VEPA mode if SRIOV is not
+ * already enabled, in which case we can't force VEPA
+ * mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ }
i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9863,6 +9881,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
+#ifdef CONFIG_PCI_IOV
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ if (pci_num_vf(pdev))
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ }
+#endif
err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index cc82a7ffacb0..9a4f2bc70cd2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2419,14 +2419,12 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
- * @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
- const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
@@ -2438,7 +2436,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 1;
+ u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
@@ -2449,21 +2447,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
- size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
+ if ((size >= skb_shinfo(skb)->gso_size) &&
+ (j < I40E_MAX_BUFFER_TXD)) {
+ size = (size % skb_shinfo(skb)->gso_size);
+ j = (size) ? 1 : 0;
+ }
if (j == I40E_MAX_BUFFER_TXD) {
- if (size < skb_shinfo(skb)->gso_size) {
- linearize = true;
- break;
- }
- j = 1;
- size -= skb_shinfo(skb)->gso_size;
- if (size)
- j++;
- size += hdr_len;
+ linearize = true;
+ break;
}
num_frags--;
} while (num_frags);
@@ -2730,7 +2725,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4653b6e653c9..23f95cdbdfcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1025,11 +1025,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- if (num_vfs)
+ if (num_vfs) {
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf,
+ BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ }
return i40e_pci_sriov_enable(pdev, num_vfs);
+ }
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ec7e220757db..f54996f19629 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1608,14 +1608,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
- * @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
- const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
@@ -1627,7 +1625,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 1;
+ u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
@@ -1638,21 +1636,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
- size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
+ if ((size >= skb_shinfo(skb)->gso_size) &&
+ (j < I40E_MAX_BUFFER_TXD)) {
+ size = (size % skb_shinfo(skb)->gso_size);
+ j = (size) ? 1 : 0;
+ }
if (j == I40E_MAX_BUFFER_TXD) {
- if (size < skb_shinfo(skb)->gso_size) {
- linearize = true;
- break;
- }
- j = 1;
- size -= skb_shinfo(skb)->gso_size;
- if (size)
- j++;
- size += hdr_len;
+ linearize = true;
+ break;
}
num_frags--;
} while (num_frags);
@@ -1940,7 +1935,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
- if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e3b9b63ad010..c3a9392cbc19 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
- wr32(trgttiml, rq->perout.start.sec);
- wr32(trgttimh, rq->perout.start.nsec);
+ wr32(trgttimh, rq->perout.start.sec);
+ wr32(trgttiml, rq->perout.start.nsec);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 636f9e350162..ac3ac2a20386 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -643,6 +643,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 824a7ab79ab6..65db69b862fb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1225,7 +1225,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
- .check_overtemp = &ixgbe_tn_check_overtemp,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82598_info = {
@@ -1234,4 +1234,5 @@ struct ixgbe_info ixgbe_82598_info = {
.mac_ops = &mac_ops_82598,
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e0c363948bf4..6b87d9634614 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -71,7 +71,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return false;
@@ -79,7 +79,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
if (!(manc & IXGBE_MANC_RCV_TCO_EN))
return false;
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (factps & IXGBE_FACTPS_MNGCG)
return false;
@@ -510,7 +510,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
/* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
!hw->wol_enabled &&
@@ -2378,4 +2378,5 @@ struct ixgbe_info ixgbe_82599_info = {
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 06d8f3cfa099..4c1c26732b67 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -57,6 +57,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+/* Base table for registers values that change by MAC */
+const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(8259X)
+};
+
/**
* ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
* control
@@ -91,6 +96,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
default:
@@ -463,7 +470,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
@@ -681,7 +688,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
bus->lan_id = bus->func;
/* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
}
@@ -799,7 +806,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* Check for EEPROM present first.
* If not present leave as none
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
@@ -1283,14 +1290,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
return IXGBE_ERR_SWFW_SYNC;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_GNT)
break;
udelay(5);
@@ -1299,7 +1306,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1309,7 +1316,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Setup EEPROM for Read/Write */
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
return 0;
@@ -1333,7 +1340,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -1353,7 +1360,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
return IXGBE_ERR_EEPROM;
@@ -1362,16 +1369,16 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the SWESMBI bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
/* If we set the bit successfully then we got the
* semaphore.
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SWESMBI)
break;
@@ -1400,11 +1407,11 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1454,15 +1461,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1480,7 +1487,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u32 mask;
u32 i;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1501,7 +1508,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
else
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
@@ -1518,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1539,7 +1546,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
@@ -1547,7 +1554,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
@@ -1571,7 +1578,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1588,7 +1595,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1601,19 +1608,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f21f8a165ec4..ec015fed8fa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -118,6 +118,8 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
+
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9a1d0f142b09..ec7b2324b77b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -207,6 +207,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
+ case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_cu_unknown:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -470,16 +471,16 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
/* NVM Register */
- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
- regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 23d82b34314e..3bf2f3cfd9f6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -81,6 +81,8 @@ const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
+static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
+
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
@@ -131,6 +133,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -2366,7 +2369,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
* - We may have missed the interrupt so always have to
* check if we got a LSC
*/
- if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
!(eicr & IXGBE_EICR_LSC))
return;
@@ -2386,14 +2389,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
break;
default:
- if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ if (adapter->hw.mac.type >= ixgbe_mac_X540)
+ return;
+ if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
return;
break;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
adapter->interrupt_event = 0;
}
@@ -2403,15 +2405,17 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
- (eicr & IXGBE_EICR_GPI_SDP1)) {
+ (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
e_crit(probe, "Fan has stopped, replace the adapter\n");
/* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
}
}
static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
return;
@@ -2421,7 +2425,8 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
* Need to check link state so complete overtemp check
* on service task
*/
- if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+ if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
+ (eicr & IXGBE_EICR_LSC)) &&
(!test_bit(__IXGBE_DOWN, &adapter->state))) {
adapter->interrupt_event = eicr;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -2437,28 +2442,46 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->phy.type == ixgbe_phy_nl)
+ return true;
+ return false;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
}
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
- if (eicr & IXGBE_EICR_GPI_SDP2) {
+ if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
ixgbe_service_event_schedule(adapter);
}
}
- if (eicr & IXGBE_EICR_GPI_SDP1) {
+ if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
ixgbe_service_event_schedule(adapter);
@@ -2543,6 +2566,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
/* don't reenable LSC while waiting for link */
@@ -2552,7 +2576,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP0;
+ mask |= IXGBE_EIMS_GPI_SDP0(hw);
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
@@ -2563,15 +2587,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
+ mask |= IXGBE_EIMS_GPI_SDP2(hw);
/* fall through */
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
+ mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
@@ -2626,6 +2652,13 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
+ adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X540);
+ }
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -4703,32 +4736,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_dfwd(adapter);
}
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- case ixgbe_phy_sfp_active_unknown:
- case ixgbe_phy_sfp_ftl_active:
- case ixgbe_phy_qsfp_passive_unknown:
- case ixgbe_phy_qsfp_active_unknown:
- case ixgbe_phy_qsfp_intel:
- case ixgbe_phy_qsfp_unknown:
- /* ixgbe_phy_none is set when no SFP module is present */
- case ixgbe_phy_none:
- return true;
- case ixgbe_phy_nl:
- if (hw->mac.type == ixgbe_mac_82598EB)
- return true;
- default:
- return false;
- }
-}
-
/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
@@ -4833,7 +4840,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- gpie |= IXGBE_SDP0_GPIEN;
+ gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
case ixgbe_mac_X540:
gpie |= IXGBE_EIMS_TS;
@@ -4845,11 +4852,11 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
/* Enable fan failure interrupt */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN(hw);
if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN;
- gpie |= IXGBE_SDP2_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN_8259X;
+ gpie |= IXGBE_SDP2_GPIEN_8259X;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -4873,6 +4880,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ if (hw->phy.ops.set_phy_power)
+ hw->phy.ops.set_phy_power(hw, true);
+
smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4992,6 +5002,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
+
+ if (hw->phy.ops.set_phy_power) {
+ if (!netif_running(adapter->netdev) && !adapter->wol)
+ hw->phy.ops.set_phy_power(hw, false);
+ else
+ hw->phy.ops.set_phy_power(hw, true);
+ }
}
/**
@@ -5260,7 +5277,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
@@ -5672,6 +5689,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
/* disallow open during test */
@@ -5729,6 +5747,8 @@ err_set_queues:
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
+ if (hw->phy.ops.set_phy_power && !adapter->wol)
+ hw->phy.ops.set_phy_power(&adapter->hw, false);
err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
err_setup_tx:
@@ -5889,6 +5909,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
*enable_wake = !!wufc;
+ if (hw->phy.ops.set_phy_power && !*enable_wake)
+ hw->phy.ops.set_phy_power(hw, false);
ixgbe_release_hw_control(adapter);
@@ -6718,6 +6740,26 @@ static void ixgbe_service_timer(unsigned long data)
ixgbe_service_event_schedule(adapter);
}
+static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
+
+ if (!hw->phy.ops.handle_lasi)
+ return;
+
+ status = hw->phy.ops.handle_lasi(&adapter->hw);
+ if (status != IXGBE_ERR_OVERTEMP)
+ return;
+
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
{
if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
@@ -6759,6 +6801,7 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
ixgbe_reset_subtask(adapter);
+ ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
ixgbe_check_overtemp_subtask(adapter);
@@ -8291,6 +8334,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
/* check eeprom to see if enabled wol */
if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
@@ -8431,10 +8478,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
+ hw->mvals = ii->mvals;
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index af828f89419f..526a20bf7488 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -347,6 +347,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -356,6 +357,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case ATH_PHY_ID:
phy_type = ixgbe_phy_nl;
break;
+ case X557_PHY_ID:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -1348,6 +1352,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1361,9 +1368,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
hw->phy.id = identifier;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
&comp_codes_10g);
@@ -1793,7 +1797,7 @@ fail:
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1822,7 +1826,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1880,9 +1884,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1898,7 +1902,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 timeout = 10;
bool ack = true;
@@ -1911,7 +1915,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ack = ixgbe_get_i2c_data(hw, &i2cctl);
udelay(1);
@@ -1941,14 +1945,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
*data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1969,7 +1973,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2005,14 +2009,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
break;
}
}
@@ -2027,9 +2031,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -2047,18 +2051,18 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
return IXGBE_ERR_I2C;
@@ -2076,7 +2080,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
**/
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
- if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+ if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
return true;
return false;
}
@@ -2090,7 +2094,7 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 i;
ixgbe_i2c_start(hw);
@@ -2137,3 +2141,36 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
return IXGBE_ERR_OVERTEMP;
}
+
+/** ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ **/
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ /* Bail if we don't have copper phy */
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 434643881287..e45988c4dad5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,6 +145,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dd6ba5916dfe..b6f424f3b1a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -91,14 +91,24 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_CAT(r, m) IXGBE_##r##_##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)])
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- 0x15F5C : 0x00028))
+
+#define IXGBE_I2CCTL_8259X 0x00028
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
+
#define IXGBE_LEDCTL 0x00200
#define IXGBE_FRTIMER 0x00048
#define IXGBE_TCPTIMER 0x0004C
@@ -106,17 +116,39 @@
#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_8259X 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_a 0x15FF8
+#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC)
#define IXGBE_EERD 0x10014
#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_8259X 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
#define IXGBE_FLMNGCTL 0x10118
#define IXGBE_FLMNGDATA 0x1011C
#define IXGBE_FLMNGCNT 0x10120
#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_8259X 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_a 0x15F64
+#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL_8259X 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_a 0x15F6C
+#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
@@ -126,14 +158,55 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLK_IN_8259X 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X
+#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT_8259X 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X
+#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN_8259X 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X
+#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT_8259X 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X
+#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN_8259X 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X
+#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN_8259X 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X
+#define IXGBE_I2C_BB_EN_X550 0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN_8259X 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
+
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -835,15 +908,36 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
-#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_8259X 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_a 0x15FEC
+#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS)
+
#define IXGBE_PCIEANACTL 0x11040
-#define IXGBE_SWSM 0x10140
-#define IXGBE_FWSM 0x10148
+#define IXGBE_SWSM_8259X 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_a 0x15F70
+#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM)
+#define IXGBE_FWSM_8259X 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_a 0x15F74
+#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM)
#define IXGBE_GSSR 0x10160
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78
+#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC)
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -855,14 +949,21 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_CIAA_X550 0x11508
-#define IXGBE_CIAD_X550 0x11510
-#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+
+#define IXGBE_CIAA_8259X 0x11088
+#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
+#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA)
+
+#define IXGBE_CIAD_8259X 0x1108C
+#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
+#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD)
+
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1204,18 +1305,37 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
+/* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
@@ -1233,6 +1353,8 @@ struct ixgbe_thermal_sensor_data {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
+#define X557_PHY_ID 0x01540240
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
@@ -1253,9 +1375,25 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_CONTROL_SOL_NL 0x0000
/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
@@ -1417,9 +1555,25 @@ enum {
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
@@ -1435,9 +1589,9 @@ enum {
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
@@ -1454,9 +1608,9 @@ enum {
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
@@ -1472,9 +1626,9 @@ enum {
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
@@ -2741,6 +2895,37 @@ union ixgbe_atr_hash_dword {
__be32 dword;
};
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -3112,6 +3297,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw);
};
struct ixgbe_eeprom_info {
@@ -3173,6 +3360,7 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
};
#include "ixgbe_mbx.h"
@@ -3216,6 +3404,7 @@ struct ixgbe_hw {
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
+ const u32 *mvals;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -3234,6 +3423,7 @@ struct ixgbe_info {
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
struct ixgbe_mbx_operations *mbx_ops;
+ const u32 *mvals;
};
@@ -3339,4 +3529,6 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 0a8b5e42e1a9..032a5870abd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -202,7 +202,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -504,8 +504,8 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
return status;
}
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == 0)
@@ -514,11 +514,11 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
hw_dbg(hw, "Flash update time out\n");
if (hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
@@ -544,7 +544,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
u32 reg;
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (reg & IXGBE_EEC_FLUDONE)
return 0;
udelay(5);
@@ -580,10 +580,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
break;
} else {
@@ -605,13 +605,13 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* corresponding FW/HW bits in the SW_FW_SYNC register.
*/
if (i >= timeout) {
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
}
}
@@ -635,9 +635,9 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
ixgbe_get_swfw_sync_semaphore(hw);
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
usleep_range(5000, 10000);
@@ -660,7 +660,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -674,7 +674,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the REGSMP bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swsm & IXGBE_SWFW_REGSMP))
return 0;
@@ -696,13 +696,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm);
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -850,9 +850,14 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
+ .set_phy_power = &ixgbe_set_copper_phy_power,
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X540)
+};
+
struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
@@ -860,4 +865,5 @@ struct ixgbe_info ixgbe_X540_info = {
.eeprom_ops = &eeprom_ops_X540,
.phy_ops = &phy_ops_X540,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X540,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index b0236985e915..7581da13e92a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,22 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/** ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -33,18 +49,11 @@
*/
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
- u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
- }
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ ixgbe_setup_mux_ctl(hw);
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -90,7 +99,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -704,111 +713,6 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
return status;
}
-/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
- * @hw: pointer to hardware structure
- **/
-static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
-
- /* CS4227 does not support autoneg, so disable the laser control
- * functions for SFP+ fiber
- */
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- mac->ops.disable_tx_laser = NULL;
- mac->ops.enable_tx_laser = NULL;
- mac->ops.flap_tx_laser = NULL;
- }
-}
-
-/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
- * @hw: pointer to hardware structure
- */
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
-{
- bool setup_linear;
- u16 reg_slice, edc_mode;
- s32 ret_val;
-
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_unknown:
- return 0;
- case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- setup_linear = true;
- break;
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- case ixgbe_sfp_type_da_act_lmt_core0:
- case ixgbe_sfp_type_da_act_lmt_core1:
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- setup_linear = false;
- break;
- default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
-
- ixgbe_init_mac_link_ops_X550em(hw);
- hw->phy.ops.reset = NULL;
-
- /* The CS4227 slice address is the base address + the port-pair reg
- * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
- */
- reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
-
- if (setup_linear)
- edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
- /* Configure CS4227 for connection type. */
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- edc_mode);
-
- if (ret_val)
- ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
- edc_mode);
-
- return ret_val;
-}
-
-/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: true when autoneg or autotry is enabled
- **/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- /* SFP */
- if (hw->phy.media_type == ixgbe_media_type_fiber) {
- /* CS4227 SFP must not enable auto-negotiation */
- *autoneg = false;
-
- if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- return 0;
- }
-
- /* Link capabilities are based on SFP */
- if (hw->phy.multispeed_fiber)
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- } else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
- }
- return 0;
-}
-
/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
* IOSF device
*
@@ -891,7 +795,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
}
status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status)
return status;
@@ -973,6 +877,417 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return status;
}
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ s32 status;
+ ixgbe_link_speed force_speed;
+
+ /* Setup internal/external PHY link speed to iXFI (10G), unless
+ * only 1G is auto advertised then setup KX link.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+ if (status)
+ return status;
+ }
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/** ixgbe_check_link_t_X550em - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Check that both the MAC and X557 external PHY have link.
+ **/
+static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 status;
+ u16 autoneg_status;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+
+ /* If check link fails or MAC link is not up, then return */
+ if (status || !(*link_up))
+ return status;
+
+ /* MAC link is up, so check external PHY link.
+ * Read this twice back to back to indicate current status.
+ */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ /* If external PHY link is not up, then indicate link not up */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ *link_up = false;
+
+ return 0;
+}
+
+/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ break;
+ case ixgbe_media_type_copper:
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ break;
+ default:
+ break;
+ }
+}
+
+/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ bool setup_linear;
+ u16 reg_slice, edc_mode;
+ s32 ret_val;
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_unknown:
+ return 0;
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ setup_linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ setup_linear = false;
+ break;
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ /* The CS4227 slice address is the base address + the port-pair reg
+ * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
+ */
+ reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
+
+ if (setup_linear)
+ edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+ /* Configure CS4227 for connection type. */
+ ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ edc_mode);
+
+ if (ret_val)
+ ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
+ edc_mode);
+
+ return ret_val;
+}
+
+/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ **/
+static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return 0;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
+ *
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ **/
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+{
+ u32 status;
+ u16 reg;
+
+ *lsc = false;
+
+ /* Vendor alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+ return status;
+
+ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+ return status;
+
+ /* High temperature failure alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status)
+ return status;
+
+ /* If high temperature failure, then return over temp error and exit */
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
+ }
+
+ /* Vendor alarm 2 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+ return status;
+
+ /* link connect/disconnect event occurred */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status)
+ return status;
+
+ /* Indicate LSC */
+ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+ *lsc = true;
+
+ return 0;
+}
+
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
+ *
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
+ **/
+static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+ bool lsc;
+
+ /* Clear interrupt flags */
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ /* Enable link status change alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+ if (status)
+ return status;
+
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ if (status)
+ return status;
+
+ /* Enables high temperature failure alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+
+ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT);
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+
+ /* Enable chip-wide vendor alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ **/
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ bool lsc;
+ u32 status;
+
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ if (status)
+ return status;
+
+ if (lsc)
+ return phy->ops.setup_internal_link(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
* @hw: pointer to hardware structure
*
@@ -1018,85 +1333,82 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u32 reg_val;
+ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
+}
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
+/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ * @hw: address of hardware structure
+ * @link_up: address of boolean to indicate link status
+ *
+ * Returns error code if unable to get link status.
+ **/
+static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+ u32 ret;
+ u16 autoneg_status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+ *link_up = false;
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+ /* read this twice back to back to indicate current status */
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
- return status;
+ return 0;
}
-/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
* @hw: point to hardware structure
*
- * Configures the integrated KR PHY to talk to the external PHY. The base
- * driver will call this function when it gets notification via interrupt from
- * the external PHY. This function forces the internal PHY into iXFI mode at
- * the correct speed.
+ * Configures the link between the integrated KR PHY and the external X557 PHY
+ * The driver will call this function when it gets a link status change
+ * interrupt from the X557 PHY. This function configures the link speed
+ * between the PHYs to match the link speed of the BASE-T link.
*
- * A return of a non-zero value indicates an error, and the base driver should
- * not report link up.
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
**/
-static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u16 lasi, autoneg_status, speed;
ixgbe_link_speed force_speed;
+ bool link_up;
+ u32 status;
+ u16 speed;
- /* Verify that the external link status has changed */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ /* If link is not up, then there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If there was no change in link status, we can just exit */
- if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+ if (!link_up)
return 0;
- /* we read this twice back to back to indicate current status */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ &speed);
if (status)
return status;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ /* If link is not still up, then no setup is necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If link is not up return an error indicating treat link as down */
- if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &speed);
+ if (!link_up)
+ return 0;
/* clear everything but the speed and duplex bits */
speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
@@ -1116,6 +1428,22 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
}
+/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_reset_phy_generic(hw);
+
+ if (status)
+ return status;
+
+ /* Configure Link Status Alarm and Temperature Threshold interrupts */
+ return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1126,25 +1454,32 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
+ ixgbe_link_speed speed;
s32 ret_val;
- u32 esdp;
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ hw->mac.ops.set_lan_id(hw);
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal PHY mode is KR, then initialize KR link */
+ if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- /* Setup function pointers based on detected SFP module and speeds */
+ /* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw);
if (phy->sfp_type != ixgbe_sfp_type_unknown)
phy->ops.reset = NULL;
@@ -1162,11 +1497,30 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
- phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal link mode is XFI, then setup iXFI internal link,
+ * else setup KR now.
+ */
+ if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ phy->ops.setup_internal_link =
+ ixgbe_setup_internal_phy_t_x550em;
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+
+ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+ phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
default:
break;
}
+
return ret_val;
}
@@ -1207,65 +1561,35 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
s32 status;
u16 reg;
- u32 retries = 2;
-
- do {
- /* decrement retries counter and exit if we hit 0 */
- if (retries < 1) {
- hw_dbg(hw, "External PHY not yet finished resetting.");
- return IXGBE_ERR_PHY;
- }
- retries--;
-
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_TX_VENDOR_ALARMS_3,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- /* Verify PHY FW reset has completed */
- } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
-
- /* Set port to low power mode */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
- /* Enable the transmitter */
status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&reg);
if (status)
return status;
- reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+ /* If PHY FW reset completed bit is set then this is the first
+ * SW instance after a power on so the PHY FW must be un-stalled.
+ */
+ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- reg);
- if (status)
- return status;
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
- /* Un-stall the PHY FW */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+ }
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- reg);
return status;
}
@@ -1282,6 +1606,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
+ u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -1366,6 +1691,15 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ ixgbe_setup_mux_ctl(hw);
+
return status;
}
@@ -1518,6 +1852,10 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
+ .read_reg = &ixgbe_read_phy_reg_generic, \
+ .write_reg = &ixgbe_write_phy_reg_generic, \
+ .setup_link = &ixgbe_setup_phy_link_generic, \
+ .set_phy_power = &ixgbe_set_copper_phy_power, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
@@ -1525,9 +1863,6 @@ static struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
- .read_reg = &ixgbe_read_phy_reg_generic,
- .write_reg = &ixgbe_write_phy_reg_generic,
- .setup_link = &ixgbe_setup_phy_link_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
};
@@ -1536,9 +1871,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
- .read_reg = NULL, /* defined later */
- .write_reg = NULL, /* defined later */
- .setup_link = NULL, /* defined later */
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_x)
};
struct ixgbe_info ixgbe_X550_info = {
@@ -1548,6 +1888,7 @@ struct ixgbe_info ixgbe_X550_info = {
.eeprom_ops = &eeprom_ops_X550,
.phy_ops = &phy_ops_X550,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550,
};
struct ixgbe_info ixgbe_X550EM_x_info = {
@@ -1557,4 +1898,5 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_X550EM_x,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550EM_x,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 68ae765873a9..82040137d7d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -49,6 +49,7 @@
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
+#include "mlx4_stats.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
@@ -3166,6 +3167,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ struct mlx4_counter *tmp_counter;
+ int err;
+ u32 if_stat_in_mod;
+
+ if (!counter_stats)
+ return -EINVAL;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
+ if_stat_in_mod = counter_index;
+ if (reset)
+ if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma,
+ if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
+ __func__, counter_index);
+ goto if_stat_out;
+ }
+ tmp_counter = (struct mlx4_counter *)mailbox->buf;
+ counter_stats->counter_mode = tmp_counter->counter_mode;
+ if (counter_stats->counter_mode == 0) {
+ counter_stats->rx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
+ be64_to_cpu(tmp_counter->rx_frames));
+ counter_stats->tx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
+ be64_to_cpu(tmp_counter->tx_frames));
+ counter_stats->rx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
+ be64_to_cpu(tmp_counter->rx_bytes));
+ counter_stats->tx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
+ be64_to_cpu(tmp_counter->tx_bytes));
+ }
+
+if_stat_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
+
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_counter tmp_vf_stats;
+ int slave;
+ int err = 0;
+
+ if (!vf_stats)
+ return -EINVAL;
+
+ if (!mlx4_is_master(dev))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf_idx);
+ if (slave < 0)
+ return -EINVAL;
+
+ port = mlx4_slaves_closest_port(dev, slave, port);
+ err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
+ if (!err && tmp_vf_stats.counter_mode == 0) {
+ vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
+ vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
+ vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
+ vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
+
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a2ddf3d75ff8..99ba1c50e585 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -119,6 +119,12 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
+ /* pf statistics */
+ "pf_rx_packets",
+ "pf_rx_bytes",
+ "pf_tx_packets",
+ "pf_tx_bytes",
+
/* priority flow control statistics rx */
"rx_pause_prio_0", "rx_pause_duration_prio_0",
"rx_pause_transition_prio_0",
@@ -368,6 +374,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->port_stats)[i];
+ for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] =
+ ((unsigned long *)&priv->pf_stats)[i];
+
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -448,6 +459,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PF_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 98efb5842fca..77179d7ae4cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1597,6 +1597,9 @@ int mlx4_en_start_port(struct net_device *dev)
}
mdev->mac_removed[priv->port] = 0;
+ priv->counter_index =
+ mlx4_get_default_counter_index(mdev->dev, priv->port);
+
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
@@ -1755,6 +1758,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Set port as not active */
priv->port_up = false;
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
@@ -1891,6 +1895,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
+ memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
@@ -2287,6 +2292,15 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+}
+
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -2484,6 +2498,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
+ .ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
@@ -2681,7 +2696,7 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
- int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+ int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
@@ -2743,6 +2758,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
+ if (mlx4_is_master(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_PF_STATS);
+ last_i += NUM_PF_STATS;
+
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
@@ -2778,6 +2798,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a56f010c846..73f6277d9ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -149,6 +149,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
+ struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
@@ -156,7 +157,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
- int i;
+ int i, counter_index;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -296,6 +297,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_unlock_bh(&priv->stats_lock);
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
/* 0xffs indicates invalid value */
memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
@@ -314,6 +320,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
+ if (tmp_counter_stats.counter_mode == 0) {
+ priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
+ priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
+ priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
+ priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
+ }
+
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->rx_priority_flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 34f2fdf4fe5d..e482fa1bb741 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -66,7 +66,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index = priv->counter_index;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7d57777e65c5..4e69cf52a579 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -479,7 +479,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+ dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
@@ -2193,20 +2193,73 @@ err_free_icm:
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int nent;
+ int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
- nent = dev->caps.max_counters;
- return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+ if (!dev->caps.max_counters)
+ return -ENOSPC;
+
+ nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
+ /* reserve last counter index for sink counter */
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
+ nent_pow2 - 1, 0,
+ nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (!dev->caps.max_counters)
+ return;
+
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
+static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ if (priv->def_counter[port] != -1)
+ mlx4_counter_free(dev, priv->def_counter[port]);
+}
+
+static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port, err = 0;
+ u32 idx;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ priv->def_counter[port] = -1;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ err = mlx4_counter_alloc(dev, &idx);
+
+ if (!err || err == -ENOSPC) {
+ priv->def_counter[port] = idx;
+ } else if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else {
+ mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
+ __func__, port + 1, err);
+ mlx4_cleanup_default_counters(dev);
+ return err;
+ }
+
+ mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
+ __func__, priv->def_counter[port], port + 1);
+ }
+
+ return err;
+}
+
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2215,8 +2268,10 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
- if (*idx == -1)
- return -ENOMEM;
+ if (*idx == -1) {
+ *idx = MLX4_SINK_COUNTER_INDEX(dev);
+ return -ENOSPC;
+ }
return 0;
}
@@ -2239,8 +2294,35 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
+ u8 counter_index)
+{
+ struct mlx4_cmd_mailbox *if_stat_mailbox;
+ int err;
+ u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
+
+ if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(if_stat_mailbox))
+ return PTR_ERR(if_stat_mailbox);
+
+ err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
+ return err;
+}
+
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (idx == MLX4_SINK_COUNTER_INDEX(dev))
+ return;
+
+ __mlx4_clear_if_stat(dev, idx);
+
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2260,6 +2342,14 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->def_counter[port - 1];
+}
+EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
+
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2395,10 +2485,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_counters_table(dev);
- if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
+ goto err_qp_table_free;
+ }
+ }
+
+ err = mlx4_allocate_default_counters(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to allocate default counters, aborting\n");
+ goto err_counters_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -2432,15 +2530,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
- goto err_counters_table_free;
+ goto err_default_countes_free;
}
}
}
return 0;
+err_default_countes_free:
+ mlx4_cleanup_default_counters(dev);
+
err_counters_table_free:
- mlx4_cleanup_counters_table(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -3173,7 +3275,9 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
@@ -3471,7 +3575,9 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_free_resource_tracker(dev,
RES_TR_FREE_SLAVES_ONLY);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f424900d23a6..a092c5c34d43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -65,6 +65,8 @@
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
+#define MLX4_QUERY_IF_STAT_RESET BIT(31)
+
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
@@ -874,6 +876,7 @@ struct mlx4_priv {
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_bitmap counters_bitmap;
+ int def_counter[MLX4_MAX_PORTS];
struct mlx4_catas_err catas_err;
@@ -1007,6 +1010,8 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index edd8fd69ec9a..d5f9adb6a784 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -566,6 +566,7 @@ struct mlx4_en_priv {
#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_rx rx_flowstats;
@@ -582,6 +583,7 @@ struct mlx4_en_priv {
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
+ u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 00555832a4ae..c5c1de9cf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -23,6 +23,14 @@ struct mlx4_en_pkt_stats {
#define NUM_PKT_STATS 43
};
+struct mlx4_en_counter_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+#define NUM_PF_STATS 4
+};
+
struct mlx4_en_port_stats {
unsigned long tso_packets;
unsigned long xmit_more;
@@ -71,7 +79,8 @@ struct mlx4_en_flow_stats_tx {
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
- NUM_FLOW_PRIORITY_STATS_RX)
+ NUM_FLOW_PRIORITY_STATS_RX + \
+ NUM_PF_STATS)
struct mlx4_en_stat_out_flow_control_mbox {
/* Total number of PAUSE frames received from the far-end port */
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 2bf437aafc53..bae8b22edbb7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -82,7 +82,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 total_size = 0;
struct mlx4_resource *profile;
- struct mlx4_resource tmp;
struct sysinfo si;
int i, j;
@@ -149,11 +148,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
- if (profile[j].size > profile[j - 1].size) {
- tmp = profile[j];
- profile[j] = profile[j - 1];
- profile[j - 1] = tmp;
- }
+ if (profile[j].size > profile[j - 1].size)
+ swap(profile[j], profile[j - 1]);
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b75214a80d0e..20268634a9ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -749,7 +749,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
int sort[MLX4_NUM_QP_REGION];
- int i, j, tmp;
+ int i, j;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
@@ -758,11 +758,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
- dev->caps.reserved_qps_cnt[sort[j - 1]]) {
- tmp = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = tmp;
- }
+ dev->caps.reserved_qps_cnt[sort[j - 1]])
+ swap(sort[j], sort[j - 1]);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index ab48386bfefc..731423ca575d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -46,8 +46,11 @@
#include "mlx4.h"
#include "fw.h"
+#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_PF_COUNTERS_PER_PORT 2
+#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
@@ -459,11 +462,21 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
+
+static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+{
+ /* reduce the sink counter */
+ return (dev->caps.max_counters - 1 -
+ (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+ / MLX4_MAX_PORTS;
+}
+
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
+ int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@@ -499,6 +512,9 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
+ /* Reduce the sink counter */
+ if (i == RES_COUNTER)
+ res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
@@ -577,9 +593,17 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- res_alloc->guaranteed[t] = 0;
if (t == mlx4_master_func_num(dev))
- res_alloc->res_free = res_alloc->quota[t];
+ res_alloc->guaranteed[t] =
+ MLX4_PF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else if (t <= max_vfs_guarantee_counter)
+ res_alloc->guaranteed[t] =
+ MLX4_VF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else
+ res_alloc->guaranteed[t] = 0;
+ res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
@@ -700,6 +724,9 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
}
}
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port);
+
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
@@ -715,6 +742,10 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ err = handle_counter(dev, qpc, slave, port);
+ if (err)
+ goto out;
+
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
@@ -859,6 +890,83 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_unlock_irq(mlx4_tlock(dev));
}
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int port);
+
+static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
+ int counter_index)
+{
+ struct res_common *r;
+ struct res_counter *counter;
+ int ret = 0;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return ret;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, counter_index, RES_COUNTER);
+ if (!r || r->owner != slave)
+ ret = -EINVAL;
+ counter = container_of(r, struct res_counter, com);
+ if (!counter->port)
+ counter->port = port;
+
+ spin_unlock_irq(mlx4_tlock(dev));
+ return ret;
+}
+
+static int handle_unexisting_counter(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qpc, u8 slave,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (port == counter->port) {
+ qpc->pri_path.counter_index = counter->com.res_id;
+ spin_unlock_irq(mlx4_tlock(dev));
+ return 0;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ /* No existing counter, need to allocate a new counter */
+ err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
+ port);
+ if (err == -ENOENT) {
+ err = 0;
+ } else if (err && err != -ENOSPC) {
+ mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
+ __func__, slave, err);
+ } else {
+ qpc->pri_path.counter_index = counter_idx;
+ mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
+ __func__, slave, qpc->pri_path.counter_index);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port)
+{
+ if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
+ return handle_existing_counter(dev, slave, port,
+ qpc->pri_path.counter_index);
+
+ return handle_unexisting_counter(dev, qpc, slave, port);
+}
+
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
@@ -952,7 +1060,7 @@ static struct res_common *alloc_srq_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_counter_tr(int id)
+static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
@@ -962,6 +1070,7 @@ static struct res_common *alloc_counter_tr(int id)
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
+ ret->port = port;
return &ret->com;
}
@@ -1022,7 +1131,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
- ret = alloc_counter_tr(id);
+ ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
@@ -1039,6 +1148,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
return ret;
}
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ int *counters_arr;
+ int i = 0, err = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return -ENOMEM;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (counter->port == port) {
+ counters_arr[i] = (int)tmp->res_id;
+ i++;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ counters_arr[i] = -1;
+
+ i = 0;
+
+ while (counters_arr[i] != -1) {
+ err = mlx4_get_counter_stats(dev, counters_arr[i], data,
+ 0);
+ if (err) {
+ memset(data, 0, sizeof(*data));
+ goto table_changed;
+ }
+ i++;
+ }
+
+table_changed:
+ kfree(counters_arr);
+ return 0;
+}
+
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
@@ -2001,7 +2157,7 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
@@ -2019,7 +2175,7 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
- err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+ err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
@@ -2101,7 +2257,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
@@ -2335,6 +2491,9 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return -EINVAL;
index = get_param_l(&in_param);
+ if (index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 0d7aef040fb0..158c88c69ef9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -12,7 +12,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
- depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 87e9e606596a..26a68b8af2c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \
+ mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index cbb3c7cb53f7..e14120eccf04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -35,7 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
-#include "vport.h"
+#include <linux/mlx5/vport.h>
#include "wq.h"
#include "transobj.h"
#include "mlx5_core.h"
@@ -57,7 +57,6 @@
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
-#define MLX5E_PARAMS_MIN_MTU 46
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
@@ -284,6 +283,8 @@ struct mlx5e_sq {
struct netdev_queue *txq;
u32 sqn;
u32 bf_buf_size;
+ u16 max_inline;
+ u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
@@ -388,6 +389,7 @@ struct mlx5e_priv {
struct mutex state_lock; /* Protects Interface state */
struct mlx5_uar cq_uar;
u32 pdn;
+ u32 tdn;
struct mlx5_core_mr mr;
struct mlx5e_channel **channel;
@@ -454,6 +456,7 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index de7aec8abca1..388938482ff9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -543,7 +543,7 @@ static int mlx5e_get_settings(struct net_device *netdev,
u32 eth_proto_oper;
int err;
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 6feebda4b3e4..120db80c47aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -722,6 +722,8 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
u8 *dmac;
g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
g[0].log_sz = 2;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index eee829d119f9..9a48d8eac0fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -257,25 +257,8 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
spin_unlock_irq(&priv->async_events_spinlock);
}
-static void mlx5e_send_nop(struct mlx5e_sq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-
- memset(cseg, 0, sizeof(*cseg));
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-
- sq->skb[pi] = NULL;
- sq->pc++;
- mlx5e_tx_notify_hw(sq, wqe);
-}
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
@@ -305,13 +288,16 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
- priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+ wqe->data.byte_count =
+ cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
}
rq->pdev = c->pdev;
@@ -367,7 +353,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
mlx5_fill_page_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
- err = mlx5_create_rq(mdev, in, inlen, &rq->rqn);
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
kvfree(in);
@@ -395,7 +381,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- err = mlx5_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
kvfree(in);
@@ -408,7 +394,7 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq)
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5_destroy_rq(mdev, rq->rqn);
+ mlx5_core_destroy_rq(mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -447,7 +433,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_disable_rq;
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
- mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+ mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -525,7 +511,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->uar_map = sq->uar.map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
- if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
+ err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ if (err)
goto err_sq_wq_destroy;
sq->txq = netdev_get_tx_queue(priv->netdev,
@@ -535,6 +522,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
return 0;
@@ -596,7 +584,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
mlx5_fill_page_array(&sq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
- err = mlx5_create_sq(mdev, in, inlen, &sq->sqn);
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
kvfree(in);
@@ -624,7 +612,7 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
- err = mlx5_modify_sq(mdev, sq->sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
kvfree(in);
@@ -637,7 +625,7 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5_destroy_sq(mdev, sq->sqn);
+ mlx5_core_destroy_sq(mdev, sq->sqn);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
@@ -688,7 +676,7 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
/* ensure hw is notified of all pending wqes */
if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq);
+ mlx5e_send_nop(sq, true);
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
while (sq->cc != sq->pc) /* wait till sq is empty */
@@ -1114,13 +1102,14 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
- return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
{
- mlx5_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
static int mlx5e_open_tises(struct mlx5e_priv *priv)
@@ -1212,6 +1201,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
#define ROUGH_MAX_L2_L3_HDR_SZ 256
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
@@ -1326,7 +1317,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx(priv, tirc, tt);
- err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
kvfree(in);
@@ -1335,7 +1326,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
{
- mlx5_destroy_tir(priv->mdev, priv->tirn[tt]);
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
}
static int mlx5e_open_tirs(struct mlx5e_priv *priv)
@@ -1366,11 +1357,30 @@ static void mlx5e_close_tirs(struct mlx5e_priv *priv)
mlx5e_close_tir(priv, i);
}
-int mlx5e_open_locked(struct net_device *netdev)
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int actual_mtu;
+ int hw_mtu;
+ int err;
+
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ if (err)
+ return err;
+
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+ netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+ __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+ netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
int num_txqs;
int err;
@@ -1379,25 +1389,9 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
- err = mlx5_set_port_mtu(mdev, netdev->mtu);
- if (err) {
- netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
- __func__, err);
- return err;
- }
-
- err = mlx5_query_port_oper_mtu(mdev, &actual_mtu);
- if (err) {
- netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
- __func__, err);
+ err = mlx5e_set_dev_port_mtu(netdev);
+ if (err)
return err;
- }
-
- if (actual_mtu != netdev->mtu)
- netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
- __func__, netdev->mtu);
-
- netdev->mtu = actual_mtu;
err = mlx5e_open_tises(priv);
if (err) {
@@ -1612,15 +1606,14 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int max_mtu;
- int err = 0;
+ int err;
- err = mlx5_query_port_max_mtu(mdev, &max_mtu);
- if (err)
- return err;
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
- netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
- __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+ if (new_mtu > max_mtu) {
+ netdev_err(netdev,
+ "%s: Bad MTU (%d) > (%d) Max\n",
+ __func__, new_mtu, max_mtu);
return -EINVAL;
}
@@ -1654,7 +1647,10 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
!MLX5_CAP_ETH(mdev, csum_cap) ||
!MLX5_CAP_ETH(mdev, max_lso_cap) ||
!MLX5_CAP_ETH(mdev, vlan_cap) ||
- !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.max_ft_level)
+ < 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP;
@@ -1715,7 +1711,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr);
+ mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
}
static void mlx5e_build_netdev(struct net_device *netdev)
@@ -1735,6 +1731,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->ethtool_ops = &mlx5e_ethtool_ops;
+ netdev->vlan_features |= NETIF_F_SG;
netdev->vlan_features |= NETIF_F_IP_CSUM;
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_GRO;
@@ -1747,7 +1744,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1826,11 +1822,18 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_unmap_free_uar;
}
+ err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
__func__, err);
- goto err_dealloc_pd;
+ goto err_dealloc_transport_domain;
}
err = register_netdev(netdev);
@@ -1847,6 +1850,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
+err_dealloc_transport_domain:
+ mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, priv->pdn);
@@ -1866,6 +1872,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
unregister_netdev(netdev);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
mlx5e_disable_async_events(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ce1317cdabd7..06e7c744ed4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -45,18 +45,18 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(!skb))
return -ENOMEM;
- skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
dma_addr = dma_map_single(rq->pdev,
/* hw start padding */
- skb->data - MLX5E_NET_IP_ALIGN,
- /* hw end padding */
+ skb->data,
+ /* hw end padding */
rq->wqe_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb;
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
*((dma_addr_t *)skb->cb) = dma_addr;
wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
dma_unmap_single(rq->pdev,
*((dma_addr_t *)skb->cb),
- skb_end_offset(skb),
+ rq->wqe_sz,
DMA_FROM_DEVICE);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8020986cdaf6..bac268a670f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -34,6 +34,33 @@
#include <linux/if_vlan.h>
#include "en.h"
+#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+
+ if (notify_hw) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+}
+
static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
u32 *size)
{
@@ -89,21 +116,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return MLX5E_MIN_INLINE;
}
-static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
-{
- struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
- int cpy1_sz = 2 * ETH_ALEN;
- int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
-
- skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
- skb_pull_inline(skb, cpy1_sz);
- vhdr->h_vlan_proto = skb->vlan_proto;
- vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
- skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
- cpy2_sz);
- skb_pull_inline(skb, cpy2_sz);
-}
-
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -149,12 +161,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
ETH_ZLEN);
}
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
- } else {
- skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
- skb_pull_inline(skb, ihs);
- }
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
eseg->inline_hdr_sz = cpu_to_be16(ihs);
@@ -215,7 +223,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq);
sq->stats.stopped++;
}
@@ -223,6 +231,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_tx_notify_hw(sq, wqe);
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((sq->pc & wq->sz_m1) > sq->edge)
+ mlx5e_send_nop(sq, false);
+
sq->stats.packets++;
return NETDEV_TX_OK;
@@ -330,7 +342,7 @@ free_skb:
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 801ccadd709a..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,34 +35,64 @@
#include <linux/module.h>
#include "mlx5_core.h"
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
{
- struct mlx5_cmd_query_adapter_mbox_out *out;
- struct mlx5_cmd_query_adapter_mbox_in in;
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- memset(&in, 0, sizeof(in));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
- goto out_out;
+ goto out;
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out_out;
- }
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
- memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
-
-out_out:
+out:
kfree(out);
+ return err;
+}
+
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+out:
+ kfree(out);
return err;
}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2510fed3494d..afad529838de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -284,14 +284,6 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-static u16 to_sw_pkey_sz(int pkey_sz)
-{
- if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
- return 0;
-
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
-}
-
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
{
@@ -386,7 +378,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
- to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
@@ -654,7 +646,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
dev->issi = 1;
return 0;
- } else if (sup_issi & (1 << 0)) {
+ } else if (sup_issi & (1 << 0) || !sup_issi) {
return 0;
}
@@ -776,9 +768,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_poll;
}
- err = mlx5_cmd_query_adapter(dev);
+ err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query adapter failed\n");
+ dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6983c1047255..fc88ecaecb4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -78,7 +78,7 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
}
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7d3d0f9f328d..70147999f657 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -104,13 +104,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask)
+ int ptys_size, int proto_mask, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
int err;
memset(in, 0, sizeof(in));
- MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
@@ -126,7 +126,7 @@ int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
@@ -145,7 +145,7 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
@@ -158,6 +158,42 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+ if (err)
+ return err;
+
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ else
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask)
{
@@ -212,21 +248,18 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
return err;
}
-static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
- int *admin_mtu, int *max_mtu, int *oper_mtu)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ int *max_mtu, int *oper_mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- int err;
memset(in, 0, sizeof(in));
- MLX5_SET(pmtu_reg, in, local_port, 1);
+ MLX5_SET(pmtu_reg, in, local_port, port);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PMTU, 0, 0);
- if (err)
- return err;
+ mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
if (max_mtu)
*max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
@@ -234,11 +267,9 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
if (admin_mtu)
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
-
- return 0;
}
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -246,21 +277,54 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
memset(in, 0, sizeof(in));
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
- MLX5_SET(pmtu_reg, in, local_port, 1);
+ MLX5_SET(pmtu_reg, in, local_port, port);
- return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
- MLX5_REG_PMTU, 0, 1);
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+ u8 port)
{
- return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
+ mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port)
{
- return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
+ mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+ return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+ if (err)
+ return err;
+
+ *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7e9d98..8b494b562263 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
+ void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ if (dev->issi) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, 0xffffff);
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dcd03c1..c48f504ccbeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
+#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+ __LINE__, MLX5_GET(srqc, srqc, state));
+ MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+ __func__, __LINE__,
+ MLX5_GET(rmpc, rmpc, state));
+ MLX5_SET(srqc, srqc, state,
+ MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
+ }
+}
+
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_destroy_srq_mbox_in din;
- struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
+
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+ sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+ sizeof(in), (u32 *)(&out),
+ sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 *xrcsrq_out;
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int inlen, int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_cmd;
+ goto err_destroy_srq_split;
}
return 0;
-err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.srqn = cpu_to_be32(srq->srqn);
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = destroy_srq_split(dev, srq);
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
- struct mlx5_query_srq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 3c555d708af1..8d98b03026d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -34,7 +34,42 @@
#include "mlx5_core.h"
#include "transobj.h"
-int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
u32 out[MLX5_ST_SZ_DW(create_rq_out)];
int err;
@@ -49,7 +84,7 @@ int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
return err;
}
-int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
@@ -60,7 +95,7 @@ int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
-void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
@@ -73,7 +108,7 @@ void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
u32 out[MLX5_ST_SZ_DW(create_sq_out)];
int err;
@@ -88,7 +123,7 @@ int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
return err;
}
-int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
@@ -99,7 +134,7 @@ int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
-void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
@@ -112,7 +147,8 @@ void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)];
int err;
@@ -127,7 +163,7 @@ int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
return err;
}
-void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -140,7 +176,8 @@ void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)];
int err;
@@ -155,7 +192,7 @@ int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
return err;
}
-void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
@@ -167,3 +204,157 @@ void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index 1bc898cc4933..f9ef244710d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -33,15 +33,32 @@
#ifndef __TRANSOBJ_H__
#define __TRANSOBJ_H__
-int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn);
-int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
-void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
-int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn);
-int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
-void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
-int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn);
-void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
-int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn);
-void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ba374b9a6c87..b94177ebcf3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -33,7 +33,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
-#include "vport.h"
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
@@ -55,8 +55,9 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
return MLX5_GET(query_vport_state_out, out, state);
}
+EXPORT_SYMBOL(mlx5_query_vport_state);
-void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
u32 *out;
@@ -82,3 +83,263 @@ void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
kvfree(out);
}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+ mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+ vf_num, gid_index, tbsz);
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+ void *out;
+ void *ctx;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+ rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+ rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+ rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+ rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+ rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+ rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+ rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+ rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+ rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+ rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask1_field_select);
+ rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+ rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask2_field_select);
+ rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+ rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+ init_type_reply);
+ rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+ rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+ subnet_timeout);
+ rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+ rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+ rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ qkey_violation_counter);
+ rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ pkey_violation_counter);
+ rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+ rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+ system_image_guid);
+
+ex:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *sys_image_guid = rep->sys_image_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *node_guid = rep->node_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/vport.h
deleted file mode 100644
index c05ca2c3419d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_VPORT_H__
-#define __MLX5_VPORT_H__
-
-#include <linux/mlx5/driver.h>
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
-void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
-
-#endif /* __MLX5_VPORT_H__ */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 48d2aecb0da9..75dc46c5fca2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr)
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
- if (hw_priv->pme_wait <= jiffies) {
+ if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 9e1aaa7f36bb..5f630a24e491 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1004,8 +1004,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
- void **tmp_arr;
-
if (channel->reserve_ptr - channel->reserve_top > 0) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
@@ -1020,10 +1018,7 @@ _alloc_after_swap:
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
-
- tmp_arr = channel->reserve_arr;
- channel->reserve_arr = channel->free_arr;
- channel->free_arr = tmp_arr;
+ swap(channel->reserve_arr, channel->free_arr);
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 196e98a2d93b..8e8031a1c6c7 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -2,6 +2,21 @@
# Renesas device configuration
#
+config NET_VENDOR_RENESAS
+ bool "Renesas devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Renesas devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_RENESAS
+
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
@@ -15,3 +30,19 @@ config SH_ETH
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
R8A7740, R8A777x and R8A779x.
+
+config RAVB
+ tristate "Renesas Ethernet AVB support"
+ depends on HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select CRC32
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ select PTP_1588_CLOCK
+ help
+ Renesas Ethernet AVB device driver.
+ This driver supports the following SoCs:
+ - R8A779x.
+
+endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 1c278a8e066a..a05102a7df02 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -3,3 +3,7 @@
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
+
+ravb-objs := ravb_main.o ravb_ptp.o
+
+obj-$(CONFIG_RAVB) += ravb.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
new file mode 100644
index 000000000000..8aa50ac4e2d6
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -0,0 +1,832 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RAVB_H__
+#define __RAVB_H__
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
+#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
+#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */
+#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */
+#define BE_TX_RING_MIN 64
+#define BE_RX_RING_MIN 64
+#define BE_TX_RING_MAX 1024
+#define BE_RX_RING_MAX 2048
+
+#define PKT_BUF_SZ 1538
+
+/* Driver's parameters */
+#define RAVB_ALIGN 128
+
+/* Hardware time stamp */
+#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
+#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
+
+#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
+#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
+#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
+#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
+#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
+
+enum ravb_reg {
+ /* AVB-DMAC registers */
+ CCC = 0x0000,
+ DBAT = 0x0004,
+ DLR = 0x0008,
+ CSR = 0x000C,
+ CDAR0 = 0x0010,
+ CDAR1 = 0x0014,
+ CDAR2 = 0x0018,
+ CDAR3 = 0x001C,
+ CDAR4 = 0x0020,
+ CDAR5 = 0x0024,
+ CDAR6 = 0x0028,
+ CDAR7 = 0x002C,
+ CDAR8 = 0x0030,
+ CDAR9 = 0x0034,
+ CDAR10 = 0x0038,
+ CDAR11 = 0x003C,
+ CDAR12 = 0x0040,
+ CDAR13 = 0x0044,
+ CDAR14 = 0x0048,
+ CDAR15 = 0x004C,
+ CDAR16 = 0x0050,
+ CDAR17 = 0x0054,
+ CDAR18 = 0x0058,
+ CDAR19 = 0x005C,
+ CDAR20 = 0x0060,
+ CDAR21 = 0x0064,
+ ESR = 0x0088,
+ RCR = 0x0090,
+ RQC0 = 0x0094,
+ RQC1 = 0x0098,
+ RQC2 = 0x009C,
+ RQC3 = 0x00A0,
+ RQC4 = 0x00A4,
+ RPC = 0x00B0,
+ UFCW = 0x00BC,
+ UFCS = 0x00C0,
+ UFCV0 = 0x00C4,
+ UFCV1 = 0x00C8,
+ UFCV2 = 0x00CC,
+ UFCV3 = 0x00D0,
+ UFCV4 = 0x00D4,
+ UFCD0 = 0x00E0,
+ UFCD1 = 0x00E4,
+ UFCD2 = 0x00E8,
+ UFCD3 = 0x00EC,
+ UFCD4 = 0x00F0,
+ SFO = 0x00FC,
+ SFP0 = 0x0100,
+ SFP1 = 0x0104,
+ SFP2 = 0x0108,
+ SFP3 = 0x010C,
+ SFP4 = 0x0110,
+ SFP5 = 0x0114,
+ SFP6 = 0x0118,
+ SFP7 = 0x011C,
+ SFP8 = 0x0120,
+ SFP9 = 0x0124,
+ SFP10 = 0x0128,
+ SFP11 = 0x012C,
+ SFP12 = 0x0130,
+ SFP13 = 0x0134,
+ SFP14 = 0x0138,
+ SFP15 = 0x013C,
+ SFP16 = 0x0140,
+ SFP17 = 0x0144,
+ SFP18 = 0x0148,
+ SFP19 = 0x014C,
+ SFP20 = 0x0150,
+ SFP21 = 0x0154,
+ SFP22 = 0x0158,
+ SFP23 = 0x015C,
+ SFP24 = 0x0160,
+ SFP25 = 0x0164,
+ SFP26 = 0x0168,
+ SFP27 = 0x016C,
+ SFP28 = 0x0170,
+ SFP29 = 0x0174,
+ SFP30 = 0x0178,
+ SFP31 = 0x017C,
+ SFM0 = 0x01C0,
+ SFM1 = 0x01C4,
+ TGC = 0x0300,
+ TCCR = 0x0304,
+ TSR = 0x0308,
+ TFA0 = 0x0310,
+ TFA1 = 0x0314,
+ TFA2 = 0x0318,
+ CIVR0 = 0x0320,
+ CIVR1 = 0x0324,
+ CDVR0 = 0x0328,
+ CDVR1 = 0x032C,
+ CUL0 = 0x0330,
+ CUL1 = 0x0334,
+ CLL0 = 0x0338,
+ CLL1 = 0x033C,
+ DIC = 0x0350,
+ DIS = 0x0354,
+ EIC = 0x0358,
+ EIS = 0x035C,
+ RIC0 = 0x0360,
+ RIS0 = 0x0364,
+ RIC1 = 0x0368,
+ RIS1 = 0x036C,
+ RIC2 = 0x0370,
+ RIS2 = 0x0374,
+ TIC = 0x0378,
+ TIS = 0x037C,
+ ISS = 0x0380,
+ GCCR = 0x0390,
+ GMTT = 0x0394,
+ GPTC = 0x0398,
+ GTI = 0x039C,
+ GTO0 = 0x03A0,
+ GTO1 = 0x03A4,
+ GTO2 = 0x03A8,
+ GIC = 0x03AC,
+ GIS = 0x03B0,
+ GCPT = 0x03B4, /* Undocumented? */
+ GCT0 = 0x03B8,
+ GCT1 = 0x03BC,
+ GCT2 = 0x03C0,
+
+ /* E-MAC registers */
+ ECMR = 0x0500,
+ RFLR = 0x0508,
+ ECSR = 0x0510,
+ ECSIPR = 0x0518,
+ PIR = 0x0520,
+ PSR = 0x0528,
+ PIPR = 0x052c,
+ MPR = 0x0558,
+ PFTCR = 0x055c,
+ PFRCR = 0x0560,
+ GECMR = 0x05b0,
+ MAHR = 0x05c0,
+ MALR = 0x05c8,
+ TROCR = 0x0700, /* Undocumented? */
+ CDCR = 0x0708, /* Undocumented? */
+ LCCR = 0x0710, /* Undocumented? */
+ CEFCR = 0x0740,
+ FRECR = 0x0748,
+ TSFRCR = 0x0750,
+ TLFRCR = 0x0758,
+ RFCR = 0x0760,
+ CERCR = 0x0768, /* Undocumented? */
+ CEECR = 0x0770, /* Undocumented? */
+ MAFCR = 0x0778,
+};
+
+
+/* Register bits of the Ethernet AVB */
+/* CCC */
+enum CCC_BIT {
+ CCC_OPC = 0x00000003,
+ CCC_OPC_RESET = 0x00000000,
+ CCC_OPC_CONFIG = 0x00000001,
+ CCC_OPC_OPERATION = 0x00000002,
+ CCC_DTSR = 0x00000100,
+ CCC_CSEL = 0x00030000,
+ CCC_CSEL_HPB = 0x00010000,
+ CCC_CSEL_ETH_TX = 0x00020000,
+ CCC_CSEL_GMII_REF = 0x00030000,
+ CCC_BOC = 0x00100000, /* Undocumented? */
+ CCC_LBME = 0x01000000,
+};
+
+/* CSR */
+enum CSR_BIT {
+ CSR_OPS = 0x0000000F,
+ CSR_OPS_RESET = 0x00000001,
+ CSR_OPS_CONFIG = 0x00000002,
+ CSR_OPS_OPERATION = 0x00000004,
+ CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_DTS = 0x00000100,
+ CSR_TPO0 = 0x00010000,
+ CSR_TPO1 = 0x00020000,
+ CSR_TPO2 = 0x00040000,
+ CSR_TPO3 = 0x00080000,
+ CSR_RPO = 0x00100000,
+};
+
+/* ESR */
+enum ESR_BIT {
+ ESR_EQN = 0x0000001F,
+ ESR_ET = 0x00000F00,
+ ESR_EIL = 0x00001000,
+};
+
+/* RCR */
+enum RCR_BIT {
+ RCR_EFFS = 0x00000001,
+ RCR_ENCF = 0x00000002,
+ RCR_ESF = 0x0000000C,
+ RCR_ETS0 = 0x00000010,
+ RCR_ETS2 = 0x00000020,
+ RCR_RFCL = 0x1FFF0000,
+};
+
+/* RQC0/1/2/3/4 */
+enum RQC_BIT {
+ RQC_RSM0 = 0x00000003,
+ RQC_UFCC0 = 0x00000030,
+ RQC_RSM1 = 0x00000300,
+ RQC_UFCC1 = 0x00003000,
+ RQC_RSM2 = 0x00030000,
+ RQC_UFCC2 = 0x00300000,
+ RQC_RSM3 = 0x03000000,
+ RQC_UFCC3 = 0x30000000,
+};
+
+/* RPC */
+enum RPC_BIT {
+ RPC_PCNT = 0x00000700,
+ RPC_DCNT = 0x00FF0000,
+};
+
+/* UFCW */
+enum UFCW_BIT {
+ UFCW_WL0 = 0x0000003F,
+ UFCW_WL1 = 0x00003F00,
+ UFCW_WL2 = 0x003F0000,
+ UFCW_WL3 = 0x3F000000,
+};
+
+/* UFCS */
+enum UFCS_BIT {
+ UFCS_SL0 = 0x0000003F,
+ UFCS_SL1 = 0x00003F00,
+ UFCS_SL2 = 0x003F0000,
+ UFCS_SL3 = 0x3F000000,
+};
+
+/* UFCV0/1/2/3/4 */
+enum UFCV_BIT {
+ UFCV_CV0 = 0x0000003F,
+ UFCV_CV1 = 0x00003F00,
+ UFCV_CV2 = 0x003F0000,
+ UFCV_CV3 = 0x3F000000,
+};
+
+/* UFCD0/1/2/3/4 */
+enum UFCD_BIT {
+ UFCD_DV0 = 0x0000003F,
+ UFCD_DV1 = 0x00003F00,
+ UFCD_DV2 = 0x003F0000,
+ UFCD_DV3 = 0x3F000000,
+};
+
+/* SFO */
+enum SFO_BIT {
+ SFO_FPB = 0x0000003F,
+};
+
+/* RTC */
+enum RTC_BIT {
+ RTC_MFL0 = 0x00000FFF,
+ RTC_MFL1 = 0x0FFF0000,
+};
+
+/* TGC */
+enum TGC_BIT {
+ TGC_TSM0 = 0x00000001,
+ TGC_TSM1 = 0x00000002,
+ TGC_TSM2 = 0x00000004,
+ TGC_TSM3 = 0x00000008,
+ TGC_TQP = 0x00000030,
+ TGC_TQP_NONAVB = 0x00000000,
+ TGC_TQP_AVBMODE1 = 0x00000010,
+ TGC_TQP_AVBMODE2 = 0x00000030,
+ TGC_TBD0 = 0x00000300,
+ TGC_TBD1 = 0x00003000,
+ TGC_TBD2 = 0x00030000,
+ TGC_TBD3 = 0x00300000,
+};
+
+/* TCCR */
+enum TCCR_BIT {
+ TCCR_TSRQ0 = 0x00000001,
+ TCCR_TSRQ1 = 0x00000002,
+ TCCR_TSRQ2 = 0x00000004,
+ TCCR_TSRQ3 = 0x00000008,
+ TCCR_TFEN = 0x00000100,
+ TCCR_TFR = 0x00000200,
+};
+
+/* TSR */
+enum TSR_BIT {
+ TSR_CCS0 = 0x00000003,
+ TSR_CCS1 = 0x0000000C,
+ TSR_TFFL = 0x00000700,
+};
+
+/* TFA2 */
+enum TFA2_BIT {
+ TFA2_TSV = 0x0000FFFF,
+ TFA2_TST = 0x03FF0000,
+};
+
+/* DIC */
+enum DIC_BIT {
+ DIC_DPE1 = 0x00000002,
+ DIC_DPE2 = 0x00000004,
+ DIC_DPE3 = 0x00000008,
+ DIC_DPE4 = 0x00000010,
+ DIC_DPE5 = 0x00000020,
+ DIC_DPE6 = 0x00000040,
+ DIC_DPE7 = 0x00000080,
+ DIC_DPE8 = 0x00000100,
+ DIC_DPE9 = 0x00000200,
+ DIC_DPE10 = 0x00000400,
+ DIC_DPE11 = 0x00000800,
+ DIC_DPE12 = 0x00001000,
+ DIC_DPE13 = 0x00002000,
+ DIC_DPE14 = 0x00004000,
+ DIC_DPE15 = 0x00008000,
+};
+
+/* DIS */
+enum DIS_BIT {
+ DIS_DPF1 = 0x00000002,
+ DIS_DPF2 = 0x00000004,
+ DIS_DPF3 = 0x00000008,
+ DIS_DPF4 = 0x00000010,
+ DIS_DPF5 = 0x00000020,
+ DIS_DPF6 = 0x00000040,
+ DIS_DPF7 = 0x00000080,
+ DIS_DPF8 = 0x00000100,
+ DIS_DPF9 = 0x00000200,
+ DIS_DPF10 = 0x00000400,
+ DIS_DPF11 = 0x00000800,
+ DIS_DPF12 = 0x00001000,
+ DIS_DPF13 = 0x00002000,
+ DIS_DPF14 = 0x00004000,
+ DIS_DPF15 = 0x00008000,
+};
+
+/* EIC */
+enum EIC_BIT {
+ EIC_MREE = 0x00000001,
+ EIC_MTEE = 0x00000002,
+ EIC_QEE = 0x00000004,
+ EIC_SEE = 0x00000008,
+ EIC_CLLE0 = 0x00000010,
+ EIC_CLLE1 = 0x00000020,
+ EIC_CULE0 = 0x00000040,
+ EIC_CULE1 = 0x00000080,
+ EIC_TFFE = 0x00000100,
+};
+
+/* EIS */
+enum EIS_BIT {
+ EIS_MREF = 0x00000001,
+ EIS_MTEF = 0x00000002,
+ EIS_QEF = 0x00000004,
+ EIS_SEF = 0x00000008,
+ EIS_CLLF0 = 0x00000010,
+ EIS_CLLF1 = 0x00000020,
+ EIS_CULF0 = 0x00000040,
+ EIS_CULF1 = 0x00000080,
+ EIS_TFFF = 0x00000100,
+ EIS_QFS = 0x00010000,
+};
+
+/* RIC0 */
+enum RIC0_BIT {
+ RIC0_FRE0 = 0x00000001,
+ RIC0_FRE1 = 0x00000002,
+ RIC0_FRE2 = 0x00000004,
+ RIC0_FRE3 = 0x00000008,
+ RIC0_FRE4 = 0x00000010,
+ RIC0_FRE5 = 0x00000020,
+ RIC0_FRE6 = 0x00000040,
+ RIC0_FRE7 = 0x00000080,
+ RIC0_FRE8 = 0x00000100,
+ RIC0_FRE9 = 0x00000200,
+ RIC0_FRE10 = 0x00000400,
+ RIC0_FRE11 = 0x00000800,
+ RIC0_FRE12 = 0x00001000,
+ RIC0_FRE13 = 0x00002000,
+ RIC0_FRE14 = 0x00004000,
+ RIC0_FRE15 = 0x00008000,
+ RIC0_FRE16 = 0x00010000,
+ RIC0_FRE17 = 0x00020000,
+};
+
+/* RIC0 */
+enum RIS0_BIT {
+ RIS0_FRF0 = 0x00000001,
+ RIS0_FRF1 = 0x00000002,
+ RIS0_FRF2 = 0x00000004,
+ RIS0_FRF3 = 0x00000008,
+ RIS0_FRF4 = 0x00000010,
+ RIS0_FRF5 = 0x00000020,
+ RIS0_FRF6 = 0x00000040,
+ RIS0_FRF7 = 0x00000080,
+ RIS0_FRF8 = 0x00000100,
+ RIS0_FRF9 = 0x00000200,
+ RIS0_FRF10 = 0x00000400,
+ RIS0_FRF11 = 0x00000800,
+ RIS0_FRF12 = 0x00001000,
+ RIS0_FRF13 = 0x00002000,
+ RIS0_FRF14 = 0x00004000,
+ RIS0_FRF15 = 0x00008000,
+ RIS0_FRF16 = 0x00010000,
+ RIS0_FRF17 = 0x00020000,
+};
+
+/* RIC1 */
+enum RIC1_BIT {
+ RIC1_RFWE = 0x80000000,
+};
+
+/* RIS1 */
+enum RIS1_BIT {
+ RIS1_RFWF = 0x80000000,
+};
+
+/* RIC2 */
+enum RIC2_BIT {
+ RIC2_QFE0 = 0x00000001,
+ RIC2_QFE1 = 0x00000002,
+ RIC2_QFE2 = 0x00000004,
+ RIC2_QFE3 = 0x00000008,
+ RIC2_QFE4 = 0x00000010,
+ RIC2_QFE5 = 0x00000020,
+ RIC2_QFE6 = 0x00000040,
+ RIC2_QFE7 = 0x00000080,
+ RIC2_QFE8 = 0x00000100,
+ RIC2_QFE9 = 0x00000200,
+ RIC2_QFE10 = 0x00000400,
+ RIC2_QFE11 = 0x00000800,
+ RIC2_QFE12 = 0x00001000,
+ RIC2_QFE13 = 0x00002000,
+ RIC2_QFE14 = 0x00004000,
+ RIC2_QFE15 = 0x00008000,
+ RIC2_QFE16 = 0x00010000,
+ RIC2_QFE17 = 0x00020000,
+ RIC2_RFFE = 0x80000000,
+};
+
+/* RIS2 */
+enum RIS2_BIT {
+ RIS2_QFF0 = 0x00000001,
+ RIS2_QFF1 = 0x00000002,
+ RIS2_QFF2 = 0x00000004,
+ RIS2_QFF3 = 0x00000008,
+ RIS2_QFF4 = 0x00000010,
+ RIS2_QFF5 = 0x00000020,
+ RIS2_QFF6 = 0x00000040,
+ RIS2_QFF7 = 0x00000080,
+ RIS2_QFF8 = 0x00000100,
+ RIS2_QFF9 = 0x00000200,
+ RIS2_QFF10 = 0x00000400,
+ RIS2_QFF11 = 0x00000800,
+ RIS2_QFF12 = 0x00001000,
+ RIS2_QFF13 = 0x00002000,
+ RIS2_QFF14 = 0x00004000,
+ RIS2_QFF15 = 0x00008000,
+ RIS2_QFF16 = 0x00010000,
+ RIS2_QFF17 = 0x00020000,
+ RIS2_RFFF = 0x80000000,
+};
+
+/* TIC */
+enum TIC_BIT {
+ TIC_FTE0 = 0x00000001, /* Undocumented? */
+ TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_TFUE = 0x00000100,
+ TIC_TFWE = 0x00000200,
+};
+
+/* TIS */
+enum TIS_BIT {
+ TIS_FTF0 = 0x00000001, /* Undocumented? */
+ TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_TFUF = 0x00000100,
+ TIS_TFWF = 0x00000200,
+};
+
+/* ISS */
+enum ISS_BIT {
+ ISS_FRS = 0x00000001, /* Undocumented? */
+ ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_ES = 0x00000040,
+ ISS_MS = 0x00000080,
+ ISS_TFUS = 0x00000100,
+ ISS_TFWS = 0x00000200,
+ ISS_RFWS = 0x00001000,
+ ISS_CGIS = 0x00002000,
+ ISS_DPS1 = 0x00020000,
+ ISS_DPS2 = 0x00040000,
+ ISS_DPS3 = 0x00080000,
+ ISS_DPS4 = 0x00100000,
+ ISS_DPS5 = 0x00200000,
+ ISS_DPS6 = 0x00400000,
+ ISS_DPS7 = 0x00800000,
+ ISS_DPS8 = 0x01000000,
+ ISS_DPS9 = 0x02000000,
+ ISS_DPS10 = 0x04000000,
+ ISS_DPS11 = 0x08000000,
+ ISS_DPS12 = 0x10000000,
+ ISS_DPS13 = 0x20000000,
+ ISS_DPS14 = 0x40000000,
+ ISS_DPS15 = 0x80000000,
+};
+
+/* GCCR */
+enum GCCR_BIT {
+ GCCR_TCR = 0x00000003,
+ GCCR_TCR_NOREQ = 0x00000000, /* No request */
+ GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */
+ GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */
+ GCCR_LTO = 0x00000004,
+ GCCR_LTI = 0x00000008,
+ GCCR_LPTC = 0x00000010,
+ GCCR_LMTT = 0x00000020,
+ GCCR_TCSS = 0x00000300,
+ GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */
+ GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */
+ GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */
+};
+
+/* GTI */
+enum GTI_BIT {
+ GTI_TIV = 0x0FFFFFFF,
+};
+
+/* GIC */
+enum GIC_BIT {
+ GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTME = 0x00000004,
+};
+
+/* GIS */
+enum GIS_BIT {
+ GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTMF = 0x00000004,
+};
+
+/* ECMR */
+enum ECMR_BIT {
+ ECMR_PRM = 0x00000001,
+ ECMR_DM = 0x00000002,
+ ECMR_TE = 0x00000020,
+ ECMR_RE = 0x00000040,
+ ECMR_MPDE = 0x00000200,
+ ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_RXF = 0x00020000,
+ ECMR_PFR = 0x00040000,
+ ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_RZPF = 0x00100000,
+ ECMR_DPAD = 0x00200000,
+ ECMR_RCSC = 0x00800000,
+ ECMR_TRCCM = 0x04000000,
+};
+
+/* ECSR */
+enum ECSR_BIT {
+ ECSR_ICD = 0x00000001,
+ ECSR_MPD = 0x00000002,
+ ECSR_LCHNG = 0x00000004,
+ ECSR_PHYI = 0x00000008,
+};
+
+/* ECSIPR */
+enum ECSIPR_BIT {
+ ECSIPR_ICDIP = 0x00000001,
+ ECSIPR_MPDIP = 0x00000002,
+ ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDC = 0x00000001,
+ PIR_MMD = 0x00000002,
+ PIR_MDO = 0x00000004,
+ PIR_MDI = 0x00000008,
+};
+
+/* PSR */
+enum PSR_BIT {
+ PSR_LMON = 0x00000001,
+};
+
+/* PIPR */
+enum PIPR_BIT {
+ PIPR_PHYIP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x0000ffff,
+};
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+};
+
+/* The Ethernet AVB descriptor definitions. */
+struct ravb_desc {
+ __le16 ds; /* Descriptor size */
+ u8 cc; /* Content control MSBs (reserved) */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descriptor pointer */
+};
+
+enum DIE_DT {
+ /* Frame data */
+ DT_FMID = 0x40,
+ DT_FSTART = 0x50,
+ DT_FEND = 0x60,
+ DT_FSINGLE = 0x70,
+ /* Chain control */
+ DT_LINK = 0x80,
+ DT_LINKFIX = 0x90,
+ DT_EOS = 0xa0,
+ /* HW/SW arbitration */
+ DT_FEMPTY = 0xc0,
+ DT_FEMPTY_IS = 0xd0,
+ DT_FEMPTY_IC = 0xe0,
+ DT_FEMPTY_ND = 0xf0,
+ DT_LEMPTY = 0x20,
+ DT_EEMPTY = 0x30,
+};
+
+struct ravb_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control LSBs */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+struct ravb_ex_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control lower bits */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+ __le32 ts_n; /* Timestampe nsec */
+ __le32 ts_sl; /* Timestamp low */
+ __le16 ts_sh; /* Timestamp high */
+ __le16 res; /* Reserved bits */
+};
+
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+/* E-MAC status code */
+enum MSC_BIT {
+ MSC_CRC = 0x01, /* Frame CRC error */
+ MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */
+ MSC_RTSF = 0x04, /* Frame length error (frame too short) */
+ MSC_RTLF = 0x08, /* Frame length error (frame too long) */
+ MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */
+ MSC_CRL = 0x20, /* Carrier lost */
+ MSC_CEEF = 0x40, /* Carrier extension error */
+ MSC_MC = 0x80, /* Multicast frame reception */
+};
+
+struct ravb_tx_desc {
+ __le16 ds_tagl; /* Descriptor size and frame tag LSBs */
+ u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+enum TX_DS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum TX_TAGH_TSR_BIT {
+ TX_TAGH = 0x3f, /* Frame tag MSBs */
+ TX_TSR = 0x40, /* Timestamp storage request */
+};
+enum RAVB_QUEUE {
+ RAVB_BE = 0, /* Best Effort Queue */
+ RAVB_NC, /* Network Control Queue */
+};
+
+#define DBAT_ENTRY_NUM 22
+#define RX_QUEUE_OFFSET 4
+#define NUM_RX_QUEUE 2
+#define NUM_TX_QUEUE 2
+
+struct ravb_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 tag;
+};
+
+struct ravb_ptp_perout {
+ u32 target;
+ u32 period;
+};
+
+#define N_EXT_TS 1
+#define N_PER_OUT 1
+
+struct ravb_ptp {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ u32 default_addend;
+ u32 current_addend;
+ int extts[N_EXT_TS];
+ struct ravb_ptp_perout perout[N_PER_OUT];
+};
+
+struct ravb_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *addr;
+ struct mdiobb_ctrl mdiobb;
+ u32 num_rx_ring[NUM_RX_QUEUE];
+ u32 num_tx_ring[NUM_TX_QUEUE];
+ u32 desc_bat_size;
+ dma_addr_t desc_bat_dma;
+ struct ravb_desc *desc_bat;
+ dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
+ dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct sk_buff **tx_skb[NUM_TX_QUEUE];
+ void **tx_buffers[NUM_TX_QUEUE];
+ u32 rx_over_errors;
+ u32 rx_fifo_errors;
+ struct net_device_stats stats[NUM_RX_QUEUE];
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+ struct list_head ts_skb_list;
+ u32 ts_skb_tag;
+ struct ravb_ptp ptp;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */
+ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
+ u32 cur_tx[NUM_TX_QUEUE];
+ u32 dirty_tx[NUM_TX_QUEUE];
+ struct napi_struct napi[NUM_RX_QUEUE];
+ struct work_struct work;
+ /* MII transceiver section. */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ int link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+
+ unsigned no_avb_link:1;
+ unsigned avb_link_active_low:1;
+};
+
+static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return ioread32(priv->addr + reg);
+}
+
+static inline void ravb_write(struct net_device *ndev, u32 data,
+ enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ iowrite32(data, priv->addr + reg);
+}
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
+
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void ravb_ptp_stop(struct net_device *ndev);
+
+#endif /* #ifndef __RAVB_H__ */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
new file mode 100644
index 000000000000..fd9745714d90
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -0,0 +1,1824 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/cache.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ravb.h"
+
+#define RAVB_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if ((ravb_read(ndev, reg) & mask) == value)
+ return 0;
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+}
+
+static int ravb_config(struct net_device *ndev)
+{
+ int error;
+
+ /* Set config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+ /* Check if the operating mode is changed to the config mode */
+ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+ if (error)
+ netdev_err(ndev, "failed to switch device to config mode\n");
+
+ return error;
+}
+
+static void ravb_set_duplex(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr = ravb_read(ndev, ECMR);
+
+ if (priv->duplex) /* Full */
+ ecmr |= ECMR_DM;
+ else /* Half */
+ ecmr &= ~ECMR_DM;
+ ravb_write(ndev, ecmr, ECMR);
+}
+
+static void ravb_set_rate(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 100: /* 100BASE */
+ ravb_write(ndev, GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GECMR_SPEED_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ravb_set_buffer_align(struct sk_buff *skb)
+{
+ u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+
+ if (reserve)
+ skb_reserve(skb, RAVB_ALIGN - reserve);
+}
+
+/* Get MAC address from the MAC address registers
+ *
+ * Ethernet AVB device doesn't have ROM for MAC address.
+ * This function gets the MAC address that was used by a bootloader.
+ */
+static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+{
+ if (mac) {
+ ether_addr_copy(ndev->dev_addr, mac);
+ } else {
+ ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
+ ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
+ }
+}
+
+static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+ u32 pir = ravb_read(priv->ndev, PIR);
+
+ if (set)
+ pir |= mask;
+ else
+ pir &= ~mask;
+ ravb_write(priv->ndev, pir, PIR);
+}
+
+/* MDC pin control */
+static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDC, level);
+}
+
+/* Data I/O pin control */
+static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MMD, output);
+}
+
+/* Set data bit */
+static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDO, value);
+}
+
+/* Get data bit */
+static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+
+ return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
+}
+
+/* MDIO bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ravb_set_mdc,
+ .set_mdio_dir = ravb_set_mdio_dir,
+ .set_mdio_data = ravb_set_mdio_data,
+ .get_mdio_data = ravb_get_mdio_data,
+};
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+ int i;
+
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free TX skb ringbuffer */
+ if (priv->tx_skb[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ dev_kfree_skb(priv->tx_skb[q][i]);
+ }
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ if (priv->tx_buffers[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ kfree(priv->tx_buffers[q][i]);
+ }
+ kfree(priv->tx_buffers[q]);
+ priv->tx_buffers[q] = NULL;
+
+ if (priv->rx_ring[q]) {
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_ex_rx_desc *rx_desc = NULL;
+ struct ravb_tx_desc *tx_desc = NULL;
+ struct ravb_desc *desc = NULL;
+ int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *buffer;
+ int i;
+
+ priv->cur_rx[q] = 0;
+ priv->cur_tx[q] = 0;
+ priv->dirty_rx[q] = 0;
+ priv->dirty_tx[q] = 0;
+
+ memset(priv->rx_ring[q], 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ priv->rx_skb[q][i] = NULL;
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ /* RX descriptor */
+ rx_desc = &priv->rx_ring[q][i];
+ /* The size of the buffer should be on 16-byte boundary. */
+ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+ priv->rx_skb[q][i] = skb;
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->rx_ring[q][i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+ priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
+
+ memset(priv->tx_ring[q], 0, tx_ring_size);
+ /* Build TX ring buffer */
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ priv->tx_skb[q][i] = NULL;
+ priv->tx_buffers[q][i] = NULL;
+ buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+ if (!buffer)
+ break;
+ /* Aligned TX buffer */
+ priv->tx_buffers[q][i] = buffer;
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->die_dt = DT_EEMPTY;
+ }
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+ tx_desc->die_dt = DT_LINKFIX; /* type */
+
+ /* RX descriptor base address for best effort */
+ desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+
+ /* TX descriptor base address for best effort */
+ desc = &priv->desc_bat[q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+}
+
+/* Init skb and descriptor buffer for Ethernet AVB */
+static int ravb_ring_init(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+
+ /* Allocate RX and TX skb rings */
+ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_skb[q]), GFP_KERNEL);
+ if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ goto error;
+
+ /* Allocate rings for the aligned buffers */
+ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
+ if (!priv->tx_buffers[q])
+ goto error;
+
+ /* Allocate all RX descriptors. */
+ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+ priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->rx_ring[q])
+ goto error;
+
+ priv->dirty_rx[q] = 0;
+
+ /* Allocate all TX descriptors. */
+ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+ priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->tx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->tx_ring[q])
+ goto error;
+
+ return 0;
+
+error:
+ ravb_ring_free(ndev, q);
+
+ return -ENOMEM;
+}
+
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr;
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+
+ /* PAUSE prohibition */
+ ecmr = ravb_read(ndev, ECMR);
+ ecmr &= ECMR_DM;
+ ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+ ravb_write(ndev, ecmr, ECMR);
+
+ ravb_set_rate(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ ravb_write(ndev, 1, MPR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+ int error;
+
+ /* Set CONFIG mode */
+ error = ravb_config(ndev);
+ if (error)
+ return error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
+
+#if defined(__LITTLE_ENDIAN)
+ ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+#else
+ ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+#endif
+
+ /* Set AVB RX */
+ ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+
+ /* Set FIFO size */
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+
+ /* Interrupt enable: */
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+ /* Receive FIFO full warning */
+ ravb_write(ndev, RIC1_RFWE, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
+ /* Frame transmitted, timestamp FIFO updated */
+ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ /* Setting the control will start the AVB-DMAC process. */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
+ CCC);
+
+ return 0;
+}
+
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry = 0;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ desc = &priv->tx_ring[q][entry];
+ if (desc->die_dt != DT_FEMPTY)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry]) {
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ free_num++;
+ }
+ stats->tx_packets++;
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
+static void ravb_get_tx_tstamp(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ struct timespec64 ts;
+ u16 tag, tfa_tag;
+ int count;
+ u32 tfa2;
+
+ count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
+ while (count--) {
+ tfa2 = ravb_read(ndev, TFA2);
+ tfa_tag = (tfa2 & TFA2_TST) >> 16;
+ ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
+ ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
+ ravb_read(ndev, TFA1);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
+ list) {
+ skb = ts_skb->skb;
+ tag = ts_skb->tag;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ if (tag == tfa_tag) {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ break;
+ }
+ }
+ ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+ }
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
+ priv->cur_rx[q];
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_ex_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct timespec64 ts;
+ u16 pkt_len = 0;
+ u8 desc_status;
+ int limit;
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->rx_ring[q][entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
+ MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
+
+ skb = priv->rx_skb[q][entry];
+ priv->rx_skb[q][entry] = NULL;
+ dma_sync_single_for_cpu(&ndev->dev,
+ le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ get_ts &= (q == RAVB_NC) ?
+ RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
+ ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
+ 32) | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ /* The size of the buffer should be on 16-byte boundary. */
+ desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev,
+ PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break; /* Better luck next round. */
+ ravb_set_buffer_align(skb);
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ le16_to_cpu(desc->ds_cc),
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+static void ravb_rcv_snd_disable(struct net_device *ndev)
+{
+ /* Disable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void ravb_rcv_snd_enable(struct net_device *ndev)
+{
+ /* Enable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+}
+
+/* function for waiting dma process finished */
+static int ravb_stop_dma(struct net_device *ndev)
+{
+ int error;
+
+ /* Wait for stopping the hardware TX process */
+ error = ravb_wait(ndev, TCCR,
+ TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ if (error)
+ return error;
+
+ error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
+ 0);
+ if (error)
+ return error;
+
+ /* Stop the E-MAC's RX/TX processes. */
+ ravb_rcv_snd_disable(ndev);
+
+ /* Wait for stopping the RX DMA process */
+ error = ravb_wait(ndev, CSR, CSR_RPO, 0);
+ if (error)
+ return error;
+
+ /* Stop AVB-DMAC process */
+ return ravb_config(ndev);
+}
+
+/* E-MAC interrupt handler */
+static void ravb_emac_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecsr, psr;
+
+ ecsr = ravb_read(ndev, ECSR);
+ ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
+ if (ecsr & ECSR_ICD)
+ ndev->stats.tx_carrier_errors++;
+ if (ecsr & ECSR_LCHNG) {
+ /* Link changed */
+ if (priv->no_avb_link)
+ return;
+ psr = ravb_read(ndev, PSR);
+ if (priv->avb_link_active_low)
+ psr ^= PSR_LMON;
+ if (!(psr & PSR_LMON)) {
+ /* DIsable RX and TX */
+ ravb_rcv_snd_disable(ndev);
+ } else {
+ /* Enable RX and TX */
+ ravb_rcv_snd_enable(ndev);
+ }
+ }
+}
+
+/* Error interrupt handler */
+static void ravb_error_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 eis, ris2;
+
+ eis = ravb_read(ndev, EIS);
+ ravb_write(ndev, ~EIS_QFS, EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+ /* Receive FIFO Overflow int */
+ if (ris2 & RIS2_RFFF)
+ priv->rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t ravb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Received and transmitted interrupts */
+ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+ int q;
+
+ /* Timestamp updated */
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Network control and best effort queue RX/TX */
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (((ris0 & ric0) & BIT(q)) ||
+ ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ result = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* E-MAC status summary */
+ if (iss & ISS_MS) {
+ ravb_emac_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ if (iss & ISS_CGIS)
+ result = ravb_ptp_interrupt(ndev);
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static int ravb_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int q = napi - priv->napi;
+ int mask = BIT(q);
+ int quota = budget;
+ u32 ris0, tis;
+
+ for (;;) {
+ tis = ravb_read(ndev, TIS);
+ ris0 = ravb_read(ndev, RIS0);
+ if (!((ris0 & mask) || (tis & mask)))
+ break;
+
+ /* Processing RX Descriptor Ring */
+ if (ris0 & mask) {
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~mask, RIS0);
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
+ /* Processing TX Descriptor Ring */
+ if (tis & mask) {
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~mask, TIS);
+ ravb_tx_free(ndev, q);
+ netif_wake_subqueue(ndev, q);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ napi_complete(napi);
+
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
+ ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+ }
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+ }
+out:
+ return budget - quota;
+}
+
+/* PHY state control function */
+static void ravb_adjust_link(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex(ndev);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ ravb_set_rate(ndev);
+ }
+ if (!priv->link) {
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
+ new_state = true;
+ priv->link = phydev->link;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_enable(ndev);
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int ravb_phy_init(struct net_device *ndev)
+{
+ struct device_node *np = ndev->dev.parent->of_node;
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+ struct device_node *pn;
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+
+ /* Try connecting to PHY */
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "failed to connect PHY\n");
+ return -ENOENT;
+ }
+
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int ravb_phy_start(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ error = ravb_phy_init(ndev);
+ if (error)
+ return error;
+
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_ethtool_gset(priv->phydev, ecmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int error;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX */
+ ravb_rcv_snd_disable(ndev);
+
+ error = phy_ethtool_sset(priv->phydev, ecmd);
+ if (error)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ priv->duplex = 1;
+ else
+ priv->duplex = 0;
+
+ ravb_set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* Enable TX and RX */
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_nway_reset(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_start_aneg(priv->phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static u32 ravb_get_msglevel(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ravb_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_missed_errors",
+ "rx_queue_0_over_errors",
+
+ "rx_queue_1_current",
+ "tx_queue_1_current",
+ "rx_queue_1_dirty",
+ "tx_queue_1_dirty",
+ "rx_queue_1_packets",
+ "tx_queue_1_packets",
+ "rx_queue_1_bytes",
+ "tx_queue_1_bytes",
+ "rx_queue_1_mcast_packets",
+ "rx_queue_1_errors",
+ "rx_queue_1_crc_errors",
+ "rx_queue_1_frame_errors_",
+ "rx_queue_1_length_errors",
+ "rx_queue_1_missed_errors",
+ "rx_queue_1_over_errors",
+};
+
+#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
+
+static int ravb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return RAVB_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ravb_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int i = 0;
+ int q;
+
+ /* Device-specific stats */
+ for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ struct net_device_stats *stats = &priv->stats[q];
+
+ data[i++] = priv->cur_rx[q];
+ data[i++] = priv->cur_tx[q];
+ data[i++] = priv->dirty_rx[q];
+ data[i++] = priv->dirty_tx[q];
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_packets;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->multicast;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_missed_errors;
+ data[i++] = stats->rx_over_errors;
+ }
+}
+
+static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ break;
+ }
+}
+
+static void ravb_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = BE_RX_RING_MAX;
+ ring->tx_max_pending = BE_TX_RING_MAX;
+ ring->rx_pending = priv->num_rx_ring[RAVB_BE];
+ ring->tx_pending = priv->num_tx_ring[RAVB_BE];
+}
+
+static int ravb_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ if (ring->tx_pending > BE_TX_RING_MAX ||
+ ring->rx_pending > BE_RX_RING_MAX ||
+ ring->tx_pending < BE_TX_RING_MIN ||
+ ring->rx_pending < BE_RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+ /* Wait for DMA stopping */
+ error = ravb_stop_dma(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "cannot set ringparam! Any AVB processes are still running?\n");
+ return error;
+ }
+ synchronize_irq(ndev->irq);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+ }
+
+ /* Set new parameters */
+ priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
+ priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
+
+ if (netif_running(ndev)) {
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return error;
+ }
+
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+
+static int ravb_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
+
+ return 0;
+}
+
+static const struct ethtool_ops ravb_ethtool_ops = {
+ .get_settings = ravb_get_settings,
+ .set_settings = ravb_set_settings,
+ .nway_reset = ravb_nway_reset,
+ .get_msglevel = ravb_get_msglevel,
+ .set_msglevel = ravb_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ravb_get_strings,
+ .get_ethtool_stats = ravb_get_ethtool_stats,
+ .get_sset_count = ravb_get_sset_count,
+ .get_ringparam = ravb_get_ringparam,
+ .set_ringparam = ravb_set_ringparam,
+ .get_ts_info = ravb_get_ts_info,
+};
+
+/* Network device open function for Ethernet AVB */
+static int ravb_open(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ napi_enable(&priv->napi[RAVB_BE]);
+ napi_enable(&priv->napi[RAVB_NC]);
+
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
+ ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_napi_off;
+ }
+
+ /* Device init */
+ error = ravb_dmac_init(ndev);
+ if (error)
+ goto out_free_irq;
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
+ return 0;
+
+out_ptp_stop:
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+ return error;
+}
+
+/* Timeout function for Ethernet AVB */
+static void ravb_tx_timeout(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ netif_err(priv, tx_err, ndev,
+ "transmit timed out, status %08x, resetting...\n",
+ ravb_read(ndev, ISS));
+
+ /* tx_errors count up */
+ ndev->stats.tx_errors++;
+
+ schedule_work(&priv->work);
+}
+
+static void ravb_tx_timeout_work(struct work_struct *work)
+{
+ struct ravb_private *priv = container_of(work, struct ravb_private,
+ work);
+ struct net_device *ndev = priv->ndev;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Wait for DMA stopping */
+ ravb_stop_dma(ndev);
+
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ /* Device init */
+ ravb_dmac_init(ndev);
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+}
+
+/* Packet transmit function for Ethernet AVB */
+static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb = NULL;
+ u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+ u32 dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 tccr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ netif_err(priv, tx_queued, ndev,
+ "still transmitting with the full ring!\n");
+ netif_stop_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ entry = priv->cur_tx[q] % priv->num_tx_ring[q];
+ priv->tx_skb[q][entry] = skb;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto drop;
+
+ buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
+ memcpy(buffer, skb->data, skb->len);
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(skb->len);
+ dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ goto drop;
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ /* TX timestamp required */
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ DMA_TO_DEVICE);
+ goto drop;
+ }
+ ts_skb->skb = skb;
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ }
+
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FSINGLE;
+
+ tccr = ravb_read(ndev, TCCR);
+ if (!(tccr & (TCCR_TSRQ0 << q)))
+ ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+
+ priv->cur_tx[q]++;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+ !ravb_tx_free(ndev, q))
+ netif_stop_subqueue(ndev, q);
+
+exit:
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[q][entry] = NULL;
+ goto exit;
+}
+
+static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ /* If skb needs TX timestamp, it is handled in network control queue */
+ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+ RAVB_BE;
+
+}
+
+static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *nstats, *stats0, *stats1;
+
+ nstats = &ndev->stats;
+ stats0 = &priv->stats[RAVB_BE];
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->tx_dropped += ravb_read(ndev, TROCR);
+ ravb_write(ndev, 0, TROCR); /* (write clear) */
+ nstats->collisions += ravb_read(ndev, CDCR);
+ ravb_write(ndev, 0, CDCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
+ ravb_write(ndev, 0, LCCR); /* (write clear) */
+
+ nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
+ ravb_write(ndev, 0, CERCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
+ ravb_write(ndev, 0, CEECR); /* (write clear) */
+
+ nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
+ nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
+ nstats->multicast = stats0->multicast + stats1->multicast;
+ nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
+ nstats->rx_frame_errors =
+ stats0->rx_frame_errors + stats1->rx_frame_errors;
+ nstats->rx_length_errors =
+ stats0->rx_length_errors + stats1->rx_length_errors;
+ nstats->rx_missed_errors =
+ stats0->rx_missed_errors + stats1->rx_missed_errors;
+ nstats->rx_over_errors =
+ stats0->rx_over_errors + stats1->rx_over_errors;
+
+ return nstats;
+}
+
+/* Update promiscuous bit */
+static void ravb_set_rx_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 ecmr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ecmr = ravb_read(ndev, ECMR);
+ if (ndev->flags & IFF_PROMISC)
+ ecmr |= ECMR_PRM;
+ else
+ ecmr &= ~ECMR_PRM;
+ ravb_write(ndev, ecmr, ECMR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Device close function for Ethernet AVB */
+static int ravb_close(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Disable interrupts by clearing the interrupt masks. */
+ ravb_write(ndev, 0, RIC0);
+ ravb_write(ndev, 0, RIC1);
+ ravb_write(ndev, 0, RIC2);
+ ravb_write(ndev, 0, TIC);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Set the config mode to stop the AVB-DMAC's processes */
+ if (ravb_stop_dma(ndev) < 0)
+ netdev_err(ndev,
+ "device will be stopped after h/w processes are done.\n");
+
+ /* Clear the timestamp list */
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+
+ /* PHY disconnect */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ return 0;
+}
+
+static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+ if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Control hardware time stamping */
+static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+ u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
+ u32 tstamp_tx_ctrl;
+
+ if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ }
+
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* ioctl to device function */
+static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ravb_hwtstamp_get(ndev, req);
+ case SIOCSHWTSTAMP:
+ return ravb_hwtstamp_set(ndev, req);
+ }
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static const struct net_device_ops ravb_netdev_ops = {
+ .ndo_open = ravb_open,
+ .ndo_stop = ravb_close,
+ .ndo_start_xmit = ravb_start_xmit,
+ .ndo_select_queue = ravb_select_queue,
+ .ndo_get_stats = ravb_get_stats,
+ .ndo_set_rx_mode = ravb_set_rx_mode,
+ .ndo_tx_timeout = ravb_tx_timeout,
+ .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
+static int ravb_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ravb_private *priv;
+ struct net_device *ndev;
+ int error, irq, q;
+ struct resource *res;
+
+ if (!np) {
+ dev_err(&pdev->dev,
+ "this driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ /* Get base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
+ NUM_TX_QUEUE, NUM_RX_QUEUE);
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ ndev->dma = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto out_release;
+ }
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr)) {
+ error = PTR_ERR(priv->addr);
+ goto out_release;
+ }
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->work, ravb_tx_timeout_work);
+
+ priv->phy_interface = of_get_phy_mode(np);
+
+ priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
+ priv->avb_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ /* Set function */
+ ndev->netdev_ops = &ravb_netdev_ops;
+ ndev->ethtool_ops = &ravb_ethtool_ops;
+
+ /* Set AVB config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+
+ /* Set CSEL value */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
+ CCC);
+
+ /* Set GTI value */
+ ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
+
+ /* Request GTI loading */
+ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+
+ /* Allocate descriptor base address table */
+ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+ &priv->desc_bat_dma, GFP_KERNEL);
+ if (!priv->desc_bat) {
+ dev_err(&ndev->dev,
+ "Cannot allocate desc base address table (size %d bytes)\n",
+ priv->desc_bat_size);
+ error = -ENOMEM;
+ goto out_release;
+ }
+ for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ priv->desc_bat[q].die_dt = DT_EOS;
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ /* Initialise HW timestamp list */
+ INIT_LIST_HEAD(&priv->ts_skb_list);
+
+ /* Debug message level */
+ priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+
+ /* Read and set MAC address */
+ ravb_read_mac_address(ndev, of_get_mac_address(np));
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev,
+ "no valid MAC address supplied, using a random one\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ dev_err(&ndev->dev, "failed to initialize MDIO\n");
+ goto out_dma_free;
+ }
+
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+
+ /* Network device register */
+ error = register_netdev(ndev);
+ if (error)
+ goto out_napi_del;
+
+ /* Print device information */
+ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return 0;
+
+out_napi_del:
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+out_dma_free:
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+out_release:
+ if (ndev)
+ free_netdev(ndev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return error;
+}
+
+static int ravb_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+ pm_runtime_put_sync(&pdev->dev);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ravb_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ravb_dev_pm_ops = {
+ .runtime_suspend = ravb_runtime_nop,
+ .runtime_resume = ravb_runtime_nop,
+};
+
+#define RAVB_PM_OPS (&ravb_dev_pm_ops)
+#else
+#define RAVB_PM_OPS NULL
+#endif
+
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790" },
+ { .compatible = "renesas,etheravb-r8a7794" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
+static struct platform_driver ravb_driver = {
+ .probe = ravb_probe,
+ .remove = ravb_remove,
+ .driver = {
+ .name = "ravb",
+ .pm = RAVB_PM_OPS,
+ .of_match_table = ravb_match_table,
+ },
+};
+
+module_platform_driver(ravb_driver);
+
+MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
+MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
new file mode 100644
index 000000000000..42656da50500
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -0,0 +1,357 @@
+/* PTP 1588 clock using the Renesas Ethernet AVB
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "ravb.h"
+
+static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ if (error)
+ return error;
+
+ ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+ return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
+ if (error)
+ return error;
+
+ ts->tv_nsec = ravb_read(ndev, GCT0);
+ ts->tv_sec = ravb_read(ndev, GCT1) |
+ ((s64)ravb_read(ndev, GCT2) << 32);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_write(struct ravb_private *priv,
+ const struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+ u32 gccr;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
+ if (error)
+ return error;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTO)
+ return -EBUSY;
+ ravb_write(ndev, ts->tv_nsec, GTO0);
+ ravb_write(ndev, ts->tv_sec, GTO1);
+ ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
+ ravb_write(ndev, gccr | GCCR_LTO, GCCR);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
+{
+ struct net_device *ndev = priv->ndev;
+ /* When the comparison value (GPTC.PTCV) is in range of
+ * [x-1 to x+1] (x is the configured increment value in
+ * GTI.TIV), it may happen that a comparison match is
+ * not detected when the timer wraps around.
+ */
+ u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
+ u32 gccr;
+
+ if (ns < gti_ns_plus_1)
+ ns = gti_ns_plus_1;
+ else if (ns > 0 - gti_ns_plus_1)
+ ns = 0 - gti_ns_plus_1;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LPTC)
+ return -EBUSY;
+ ravb_write(ndev, ns, GPTC);
+ ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
+
+ return 0;
+}
+
+/* PTP clock operations */
+static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 diff, addend;
+ bool neg_adj = false;
+ u32 gccr;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+ addend = priv->ptp.default_addend;
+ diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->ptp.current_addend = addend;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTI)
+ return -EBUSY;
+ ravb_write(ndev, addend & GTI_TIV, GTI);
+ ravb_write(ndev, gccr | GCCR_LTI, GCCR);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct timespec64 ts;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, &ts);
+ if (!error) {
+ u64 now = ktime_to_ns(timespec64_to_ktime(ts));
+
+ ts = ns_to_timespec64(now + delta);
+ error = ravb_ptp_time_write(priv, &ts);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_write(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_extts_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (priv->ptp.extts[req->index] == on)
+ return 0;
+ priv->ptp.extts[req->index] = on;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gic = ravb_read(ndev, GIC);
+ if (on)
+ gic |= GIC_PTCE;
+ else
+ gic &= ~GIC_PTCE;
+ ravb_write(ndev, gic, GIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_perout_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ struct ravb_ptp_perout *perout;
+ unsigned long flags;
+ int error = 0;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (on) {
+ u64 start_ns;
+ u64 period_ns;
+
+ start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
+ period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
+
+ if (start_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ if (period_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->target = (u32)start_ns;
+ perout->period = (u32)period_ns;
+ error = ravb_ptp_update_compare(priv, (u32)start_ns);
+ if (!error) {
+ /* Unmask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic |= GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ } else {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->period = 0;
+
+ /* Mask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic &= ~GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ switch (req->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return ravb_ptp_extts(ptp, &req->extts, on);
+ case PTP_CLK_REQ_PEROUT:
+ return ravb_ptp_perout(ptp, &req->perout, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ptp_clock_info ravb_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ravb clock",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = N_PER_OUT,
+ .adjfreq = ravb_ptp_adjfreq,
+ .adjtime = ravb_ptp_adjtime,
+ .gettime64 = ravb_ptp_gettime64,
+ .settime64 = ravb_ptp_settime64,
+ .enable = ravb_ptp_enable,
+};
+
+/* Caller must hold the lock */
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 gis = ravb_read(ndev, GIS);
+
+ gis &= ravb_read(ndev, GIC);
+ if (gis & GIS_PTCF) {
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ravb_read(ndev, GCPT);
+ ptp_clock_event(priv->ptp.clock, &event);
+ }
+ if (gis & GIS_PTMF) {
+ struct ravb_ptp_perout *perout = priv->ptp.perout;
+
+ if (perout->period) {
+ perout->target += perout->period;
+ ravb_ptp_update_compare(priv, perout->target);
+ }
+ }
+
+ if (gis) {
+ ravb_write(ndev, ~gis, GIS);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 gccr;
+
+ priv->ptp.info = ravb_ptp_info;
+
+ priv->ptp.default_addend = ravb_read(ndev, GTI);
+ priv->ptp.current_addend = priv->ptp.default_addend;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
+ ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
+}
+
+void ravb_ptp_stop(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_write(ndev, 0, GIC);
+ ravb_write(ndev, 0, GIS);
+
+ ptp_clock_unregister(priv->ptp.clock);
+}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 819289e5be4f..d4ec660bb3b7 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -326,10 +326,18 @@ static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
return !!rocker_port->bridge_dev;
}
+#define ROCKER_OP_FLAG_REMOVE BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT BIT(1)
+#define ROCKER_OP_FLAG_LEARNED BIT(2)
+#define ROCKER_OP_FLAG_REFRESH BIT(3)
+
static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, size_t size)
+ enum switchdev_trans trans, int flags,
+ size_t size)
{
struct list_head *elem = NULL;
+ gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
+ GFP_ATOMIC : GFP_KERNEL;
/* If in transaction prepare phase, allocate the memory
* and enqueue it on a per-port list. If in transaction
@@ -342,7 +350,7 @@ static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
switch (trans) {
case SWITCHDEV_TRANS_PREPARE:
- elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
if (!elem)
return NULL;
list_add_tail(elem, &rocker_port->trans_mem);
@@ -353,7 +361,7 @@ static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
list_del_init(elem);
break;
case SWITCHDEV_TRANS_NONE:
- elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
if (elem)
INIT_LIST_HEAD(elem);
break;
@@ -365,16 +373,17 @@ static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
}
static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, size_t size)
+ enum switchdev_trans trans, int flags,
+ size_t size)
{
- return __rocker_port_mem_alloc(rocker_port, trans, size);
+ return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
}
static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, size_t n,
- size_t size)
+ enum switchdev_trans trans, int flags,
+ size_t n, size_t size)
{
- return __rocker_port_mem_alloc(rocker_port, trans, n * size);
+ return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
}
static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
@@ -397,11 +406,13 @@ static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
struct rocker_wait {
wait_queue_head_t wait;
bool done;
+ bool nowait;
};
static void rocker_wait_reset(struct rocker_wait *wait)
{
wait->done = false;
+ wait->nowait = false;
}
static void rocker_wait_init(struct rocker_wait *wait)
@@ -411,11 +422,12 @@ static void rocker_wait_init(struct rocker_wait *wait)
}
static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ enum switchdev_trans trans,
+ int flags)
{
struct rocker_wait *wait;
- wait = rocker_port_kzalloc(rocker_port, trans, sizeof(*wait));
+ wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
if (!wait)
return NULL;
rocker_wait_init(wait);
@@ -1386,7 +1398,12 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
spin_lock(&rocker->cmd_ring_lock);
while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
wait = rocker_desc_cookie_ptr_get(desc_info);
- rocker_wait_wake_up(wait);
+ if (wait->nowait) {
+ rocker_desc_gen_clear(desc_info);
+ rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
+ } else {
+ rocker_wait_wake_up(wait);
+ }
credits++;
}
spin_unlock(&rocker->cmd_ring_lock);
@@ -1437,45 +1454,19 @@ static int rocker_event_link_change(const struct rocker *rocker,
return 0;
}
-#define ROCKER_OP_FLAG_REMOVE BIT(0)
-#define ROCKER_OP_FLAG_LEARNED BIT(1)
-#define ROCKER_OP_FLAG_REFRESH BIT(2)
-
static int rocker_port_fdb(struct rocker_port *rocker_port,
enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
-struct rocker_mac_vlan_seen_work {
- struct work_struct work;
- struct rocker_port *rocker_port;
- int flags;
- unsigned char addr[ETH_ALEN];
- __be16 vlan_id;
-};
-
-static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
-{
- const struct rocker_mac_vlan_seen_work *sw =
- container_of(work, struct rocker_mac_vlan_seen_work, work);
-
- rtnl_lock();
- rocker_port_fdb(sw->rocker_port, SWITCHDEV_TRANS_NONE,
- sw->addr, sw->vlan_id, sw->flags);
- rtnl_unlock();
-
- kfree(work);
-}
-
static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_mac_vlan_seen_work *sw;
const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
unsigned int port_number;
struct rocker_port *rocker_port;
const unsigned char *addr;
- int flags = ROCKER_OP_FLAG_LEARNED;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
__be16 vlan_id;
rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
@@ -1497,20 +1488,8 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- sw = kmalloc(sizeof(*sw), GFP_ATOMIC);
- if (!sw)
- return -ENOMEM;
-
- INIT_WORK(&sw->work, rocker_event_mac_vlan_seen_work);
-
- sw->rocker_port = rocker_port;
- sw->flags = flags;
- ether_addr_copy(sw->addr, addr);
- sw->vlan_id = vlan_id;
-
- schedule_work(&sw->work);
-
- return 0;
+ return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
+ addr, vlan_id, flags);
}
static int rocker_event_process(const struct rocker *rocker,
@@ -1595,32 +1574,34 @@ typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
void *priv);
static int rocker_cmd_exec(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ enum switchdev_trans trans, int flags,
rocker_cmd_prep_cb_t prepare, void *prepare_priv,
rocker_cmd_proc_cb_t process, void *process_priv)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
- unsigned long flags;
+ bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
+ unsigned long lock_flags;
int err;
- wait = rocker_wait_create(rocker_port, trans);
+ wait = rocker_wait_create(rocker_port, trans, flags);
if (!wait)
return -ENOMEM;
+ wait->nowait = nowait;
- spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+ spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
desc_info = rocker_desc_head_get(&rocker->cmd_ring);
if (!desc_info) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
err = -EAGAIN;
goto out;
}
err = prepare(rocker_port, desc_info, prepare_priv);
if (err) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
goto out;
}
@@ -1629,7 +1610,10 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port,
if (trans != SWITCHDEV_TRANS_PREPARE)
rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
+
+ if (nowait)
+ return 0;
if (trans != SWITCHDEV_TRANS_PREPARE)
if (!rocker_wait_event_timeout(wait, HZ / 10))
@@ -1859,7 +1843,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
ecmd);
@@ -1868,7 +1852,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
macaddr);
@@ -1877,7 +1861,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_set_port_settings_ethtool_prep,
ecmd, NULL, NULL);
}
@@ -1885,7 +1869,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_set_port_settings_macaddr_prep,
macaddr, NULL, NULL);
}
@@ -1893,7 +1877,7 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_port_set_learning(struct rocker_port *rocker_port,
enum switchdev_trans trans)
{
- return rocker_cmd_exec(rocker_port, trans,
+ return rocker_cmd_exec(rocker_port, trans, 0,
rocker_cmd_set_port_learning_prep,
NULL, NULL, NULL);
}
@@ -2409,17 +2393,17 @@ rocker_flow_tbl_find(const struct rocker *rocker,
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ enum switchdev_trans trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long flags;
+ unsigned long lock_flags;
match->key_crc32 = crc32(~0, &match->key, key_len);
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
found = rocker_flow_tbl_find(rocker, match);
@@ -2439,25 +2423,25 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
if (trans != SWITCHDEV_TRANS_PREPARE)
hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
- return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
- found, NULL, NULL);
+ return rocker_cmd_exec(rocker_port, trans, flags,
+ rocker_cmd_flow_tbl_add, found, NULL, NULL);
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ enum switchdev_trans trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long flags;
+ unsigned long lock_flags;
int err = 0;
match->key_crc32 = crc32(~0, &match->key, key_len);
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
found = rocker_flow_tbl_find(rocker, match);
@@ -2467,12 +2451,12 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
rocker_port_kfree(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker_port, trans,
+ err = rocker_cmd_exec(rocker_port, trans, flags,
rocker_cmd_flow_tbl_del,
found, NULL, NULL);
rocker_port_kfree(trans, found);
@@ -2486,9 +2470,9 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
struct rocker_flow_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_flow_tbl_del(rocker_port, trans, entry);
+ return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
else
- return rocker_flow_tbl_add(rocker_port, trans, entry);
+ return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
@@ -2498,7 +2482,7 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2520,7 +2504,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2547,7 +2531,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2587,7 +2571,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2637,7 +2621,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2667,7 +2651,7 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
u32 priority;
struct rocker_flow_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2735,14 +2719,14 @@ static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ enum switchdev_trans trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
- unsigned long flags;
+ unsigned long lock_flags;
- spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
found = rocker_group_tbl_find(rocker, match);
@@ -2760,22 +2744,22 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
if (trans != SWITCHDEV_TRANS_PREPARE)
hash_add(rocker->group_tbl, &found->entry, found->group_id);
- spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
- return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
- found, NULL, NULL);
+ return rocker_cmd_exec(rocker_port, trans, flags,
+ rocker_cmd_group_tbl_add, found, NULL, NULL);
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ enum switchdev_trans trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
- unsigned long flags;
+ unsigned long lock_flags;
int err = 0;
- spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
found = rocker_group_tbl_find(rocker, match);
@@ -2785,12 +2769,12 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
- spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
rocker_group_tbl_entry_free(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker_port, trans,
+ err = rocker_cmd_exec(rocker_port, trans, flags,
rocker_cmd_group_tbl_del,
found, NULL, NULL);
rocker_group_tbl_entry_free(trans, found);
@@ -2804,9 +2788,9 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port,
struct rocker_group_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_group_tbl_del(rocker_port, trans, entry);
+ return rocker_group_tbl_del(rocker_port, trans, flags, entry);
else
- return rocker_group_tbl_add(rocker_port, trans, entry);
+ return rocker_group_tbl_add(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
@@ -2816,7 +2800,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
{
struct rocker_group_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2833,15 +2817,15 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
{
struct rocker_group_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
- sizeof(u32));
+ entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+ group_count, sizeof(u32));
if (!entry->group_ids) {
rocker_port_kfree(trans, entry);
return -ENOMEM;
@@ -2868,7 +2852,7 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
{
struct rocker_group_tbl_entry *entry;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2901,10 +2885,10 @@ static void _rocker_neigh_add(struct rocker *rocker,
enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
- entry->index = rocker->neigh_tbl_next_index;
+ if (trans != SWITCHDEV_TRANS_COMMIT)
+ entry->index = rocker->neigh_tbl_next_index++;
if (trans == SWITCHDEV_TRANS_PREPARE)
return;
- rocker->neigh_tbl_next_index++;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
@@ -2951,7 +2935,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
bool removing;
int err = 0;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -3067,7 +3051,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
bool resolved = true;
int err = 0;
- entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -3124,8 +3108,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
int err = 0;
int i;
- group_ids = rocker_port_kcalloc(rocker_port, trans, rocker->port_count,
- sizeof(u32));
+ group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+ rocker->port_count, sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -3550,7 +3534,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- lw = rocker_port_kzalloc(rocker_port, trans, sizeof(*lw));
+ lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
if (!lw)
return -ENOMEM;
@@ -3594,7 +3578,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = rocker_port_kzalloc(rocker_port, trans, sizeof(*fdb));
+ fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
if (!fdb)
return -ENOMEM;
@@ -3632,12 +3616,11 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ enum switchdev_trans trans, int flags)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
unsigned long lock_flags;
- int flags = ROCKER_OP_FLAG_REMOVE;
struct hlist_node *tmp;
int bkt;
int err = 0;
@@ -3646,6 +3629,8 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
rocker_port->stp_state == BR_STATE_FORWARDING)
return 0;
+ flags |= ROCKER_OP_FLAG_REMOVE;
+
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
@@ -3702,13 +3687,12 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
}
static int rocker_port_fwding(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ enum switchdev_trans trans, int flags)
{
bool pop_vlan;
u32 out_pport;
__be16 vlan_id;
u16 vid;
- int flags = 0;
int err;
/* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3742,12 +3726,12 @@ static int rocker_port_fwding(struct rocker_port *rocker_port,
}
static int rocker_port_stp_update(struct rocker_port *rocker_port,
- enum switchdev_trans trans, u8 state)
+ enum switchdev_trans trans, int flags,
+ u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
bool prev_ctrls[ROCKER_CTRL_MAX];
u8 prev_state;
- int flags;
int err;
int i;
@@ -3783,8 +3767,9 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (want[i] != rocker_port->ctrls[i]) {
- flags = (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
- err = rocker_port_ctrl(rocker_port, trans, flags,
+ int ctrl_flags = flags |
+ (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
&rocker_ctrls[i]);
if (err)
goto err_out;
@@ -3792,11 +3777,11 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
}
}
- err = rocker_port_fdb_flush(rocker_port, trans);
+ err = rocker_port_fdb_flush(rocker_port, trans, flags);
if (err)
goto err_out;
- err = rocker_port_fwding(rocker_port, trans);
+ err = rocker_port_fwding(rocker_port, trans, flags);
err_out:
if (trans == SWITCHDEV_TRANS_PREPARE) {
@@ -3808,25 +3793,27 @@ err_out:
}
static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ enum switchdev_trans trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return rocker_port_stp_update(rocker_port, trans, BR_STATE_FORWARDING);
+ return rocker_port_stp_update(rocker_port, trans, flags,
+ BR_STATE_FORWARDING);
}
static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ enum switchdev_trans trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return rocker_port_stp_update(rocker_port, trans, BR_STATE_DISABLED);
+ return rocker_port_stp_update(rocker_port, trans, flags,
+ BR_STATE_DISABLED);
}
static struct rocker_internal_vlan_tbl_entry *
@@ -3990,7 +3977,7 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err)
goto err_fwd_enable;
@@ -4017,7 +4004,8 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE);
+ rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_NOWAIT);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
rocker_port_dma_rings_fini(rocker_port);
@@ -4171,7 +4159,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_phys_name_proc,
&name);
@@ -4266,6 +4254,7 @@ static int rocker_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_PORT_STP_STATE:
err = rocker_port_stp_update(rocker_port, attr->trans,
+ ROCKER_OP_FLAG_NOWAIT,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
@@ -4403,7 +4392,7 @@ static int rocker_port_fdb_del(struct rocker_port *rocker_port,
const struct switchdev_obj_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
if (!rocker_port_is_bridged(rocker_port))
return -EINVAL;
@@ -4618,7 +4607,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
priv);
@@ -4802,6 +4791,8 @@ static void rocker_remove_ports(const struct rocker *rocker)
for (i = 0; i < rocker->port_count; i++) {
rocker_port = rocker->ports[i];
+ if (!rocker_port)
+ continue;
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
@@ -5168,7 +5159,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
+ err = rocker_port_fwd_enable(rocker_port,
+ SWITCHDEV_TRANS_NONE, 0);
return err;
}
@@ -5227,7 +5219,8 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+ int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
+ ROCKER_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *)n->primary_key;
return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0c42ed9c9e4c..67bdaf39dce5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2920,6 +2920,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
+ efx->state = STATE_UNINIT;
rtnl_unlock();
if (efx->type->sriov_fini)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 30e28f0d9a60..26c339dd0467 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -98,7 +98,7 @@ static void set_to_rgmii(struct rk_priv_data *bsp_priv,
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
@@ -115,7 +115,7 @@ static void set_to_rmii(struct rk_priv_data *bsp_priv)
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
@@ -128,7 +128,7 @@ static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
@@ -147,7 +147,7 @@ static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
@@ -170,46 +170,46 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv)
bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
if (IS_ERR(bsp_priv->mac_clk_rx))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "mac_clk_rx");
+ dev_err(dev, "cannot get clock %s\n",
+ "mac_clk_rx");
bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
if (IS_ERR(bsp_priv->mac_clk_tx))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "mac_clk_tx");
+ dev_err(dev, "cannot get clock %s\n",
+ "mac_clk_tx");
bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
if (IS_ERR(bsp_priv->aclk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "aclk_mac");
+ dev_err(dev, "cannot get clock %s\n",
+ "aclk_mac");
bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
if (IS_ERR(bsp_priv->pclk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "pclk_mac");
+ dev_err(dev, "cannot get clock %s\n",
+ "pclk_mac");
bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
if (IS_ERR(bsp_priv->clk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "stmmaceth");
+ dev_err(dev, "cannot get clock %s\n",
+ "stmmaceth");
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
if (IS_ERR(bsp_priv->clk_mac_ref))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "clk_mac_ref");
+ dev_err(dev, "cannot get clock %s\n",
+ "clk_mac_ref");
if (!bsp_priv->clock_input) {
bsp_priv->clk_mac_refout =
devm_clk_get(dev, "clk_mac_refout");
if (IS_ERR(bsp_priv->clk_mac_refout))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "clk_mac_refout");
+ dev_err(dev, "cannot get clock %s\n",
+ "clk_mac_refout");
}
}
if (bsp_priv->clock_input) {
- dev_info(dev, "%s: clock input from PHY\n", __func__);
+ dev_info(dev, "clock input from PHY\n");
} else {
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
@@ -296,20 +296,18 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
struct device *dev = &bsp_priv->pdev->dev;
if (!ldo) {
- dev_err(dev, "%s: no regulator found\n", __func__);
+ dev_err(dev, "no regulator found\n");
return -1;
}
if (enable) {
ret = regulator_enable(ldo);
if (ret)
- dev_err(dev, "%s: fail to enable phy-supply\n",
- __func__);
+ dev_err(dev, "fail to enable phy-supply\n");
} else {
ret = regulator_disable(ldo);
if (ret)
- dev_err(dev, "%s: fail to disable phy-supply\n",
- __func__);
+ dev_err(dev, "fail to disable phy-supply\n");
}
return 0;
@@ -341,12 +339,11 @@ static void *rk_gmac_setup(struct platform_device *pdev)
ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
if (ret) {
- dev_err(dev, "%s: Can not read property: clock_in_out.\n",
- __func__);
+ dev_err(dev, "Can not read property: clock_in_out.\n");
bsp_priv->clock_input = true;
} else {
- dev_info(dev, "%s: clock input or output? (%s).\n",
- __func__, strings);
+ dev_info(dev, "clock input or output? (%s).\n",
+ strings);
if (!strcmp(strings, "input"))
bsp_priv->clock_input = true;
else
@@ -356,22 +353,22 @@ static void *rk_gmac_setup(struct platform_device *pdev)
ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
if (ret) {
bsp_priv->tx_delay = 0x30;
- dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
- dev_err(dev, "%s: set tx_delay to 0x%x\n",
- __func__, bsp_priv->tx_delay);
+ dev_err(dev, "Can not read property: tx_delay.");
+ dev_err(dev, "set tx_delay to 0x%x\n",
+ bsp_priv->tx_delay);
} else {
- dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+ dev_info(dev, "TX delay(0x%x).\n", value);
bsp_priv->tx_delay = value;
}
ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
if (ret) {
bsp_priv->rx_delay = 0x10;
- dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
- dev_err(dev, "%s: set rx_delay to 0x%x\n",
- __func__, bsp_priv->rx_delay);
+ dev_err(dev, "Can not read property: rx_delay.");
+ dev_err(dev, "set rx_delay to 0x%x\n",
+ bsp_priv->rx_delay);
} else {
- dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+ dev_info(dev, "RX delay(0x%x).\n", value);
bsp_priv->rx_delay = value;
}
@@ -381,13 +378,13 @@ static void *rk_gmac_setup(struct platform_device *pdev)
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
- dev_info(dev, "%s: init for RGMII\n", __func__);
+ dev_info(dev, "init for RGMII\n");
set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
} else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
- dev_info(dev, "%s: init for RMII\n", __func__);
+ dev_info(dev, "init for RMII\n");
set_to_rmii(bsp_priv);
} else {
- dev_err(dev, "%s: NO interface defined!\n", __func__);
+ dev_err(dev, "NO interface defined!\n");
}
gmac_clk_init(bsp_priv);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ac62a5e248b0..79f0ec4e51ac 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -102,6 +102,18 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
}
}
+/**
+ * gelic_descr_get_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static enum gelic_descr_dma_status
+gelic_descr_get_status(struct gelic_descr *descr)
+{
+ return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+}
+
static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
{
int status;
@@ -278,18 +290,6 @@ void gelic_card_down(struct gelic_card *card)
}
/**
- * gelic_descr_get_status -- returns the status of a descriptor
- * @descr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static enum gelic_descr_dma_status
-gelic_descr_get_status(struct gelic_descr *descr)
-{
- return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
-}
-
-/**
* gelic_card_free_chain - free descriptor chain
* @card: card structure
* @descr_in: address of desc
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index f66ddaee0c87..ad7f51218ab5 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on (PCI || USE_OF)
+ depends on (PCI || OF_IRQ)
select CRC32
select MII
---help---
@@ -43,7 +43,7 @@ config VIA_RHINE_MMIO
config VIA_VELOCITY
tristate "VIA Velocity support"
- depends on (PCI || USE_OF)
+ depends on (PCI || (OF_ADDRESS && OF_IRQ))
select CRC32
select CRC_CCITT
select MII
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 725106f75d42..a83263743665 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -96,7 +96,6 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3b99a4df71f8..5a1068df7038 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -62,12 +62,12 @@
u32 temac_ior(struct temac_local *lp, int offset)
{
- return in_be32((u32 *)(lp->regs + offset));
+ return in_be32(lp->regs + offset);
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
- out_be32((u32 *) (lp->regs + offset), value);
+ out_be32(lp->regs + offset, value);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+ return in_be32(lp->sdma_regs + (reg << 2));
}
/**
@@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg)
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
- out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+ out_be32(lp->sdma_regs + (reg << 2), value);
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_unlock(&lp->indirect_mutex);
}
-struct temac_option {
+static struct temac_option {
int flg;
u32 opt;
u32 reg;
@@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev)
ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index cc27dea3414e..9956680402de 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 63ff08a26da8..7856b6ccf5c5 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -76,7 +76,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
-#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b5dc59de094e..4dea85bfc545 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
- /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
- phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
@@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 0d16c7d9e1bf..2a328703b4ae 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -158,6 +158,18 @@ static struct phy_driver dm91xx_driver[] = {
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = 0x0181b8b0,
+ .name = "Davicom DM9161B/C",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = dm9161_config_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
@@ -186,6 +198,7 @@ module_phy_driver(dm91xx_driver);
static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
{ 0x00181b80, 0x0ffffff0 },
{ }
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index ef0b4eb15f8d..c7a12e2e07b7 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -113,17 +113,17 @@ static int dp83867_of_init(struct phy_device *phydev)
if (!phydev->dev.of_node)
return -ENODEV;
- ret = of_property_read_u32(of_node, "ti,rx_int_delay",
+ ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (ret)
return ret;
- ret = of_property_read_u32(of_node, "ti,tx_int_delay",
+ ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (ret)
return ret;
- ret = of_property_read_u32(of_node, "ti,fifo_depth",
+ ret = of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
if (ret)
return ret;
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 414fdf1f343f..fc7abc50b4f1 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -81,7 +81,13 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
return -ETIMEDOUT;
cmd = __raw_readl(priv->base + MDIO_CMD);
- if (cmd & MDIO_READ_FAIL)
+
+ /* Some broken devices are known not to release the line during
+ * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+ * that condition here and ignore the MDIO controller read failure
+ * indication.
+ */
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
return -EIO;
return cmd & 0xffff;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ebdc357c5131..499185eaf413 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev)
}
static int ksz9021_load_values_from_of(struct phy_device *phydev,
- struct device_node *of_node, u16 reg,
- char *field1, char *field2,
- char *field3, char *field4)
+ const struct device_node *of_node,
+ u16 reg,
+ const char *field1, const char *field2,
+ const char *field3, const char *field4)
{
int val1 = -1;
int val2 = -2;
@@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
static int ksz9021_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -365,6 +366,11 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define KSZ9031_PS_TO_REG 60
/* Extended registers */
+/* MMD Address 0x0 */
+#define MII_KSZ9031RN_FLP_BURST_TX_LO 3
+#define MII_KSZ9031RN_FLP_BURST_TX_HI 4
+
+/* MMD Address 0x2 */
#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
@@ -389,9 +395,9 @@ static int ksz9031_extended_read(struct phy_device *phydev,
}
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
- struct device_node *of_node,
+ const struct device_node *of_node,
u16 reg, size_t field_sz,
- char *field[], u8 numfields)
+ const char *field[], u8 numfields)
{
int val[4] = {-1, -2, -3, -4};
int matches = 0;
@@ -425,20 +431,36 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
}
+static int ksz9031_center_flp_timing(struct phy_device *phydev)
+{
+ int result;
+
+ /* Center KSZ9031RNX FLP timing at 16ms. */
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+
+ if (result)
+ return result;
+
+ return genphy_restart_aneg(phydev);
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
- char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
- char *rx_data_skews[4] = {
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
+ static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ static const char *rx_data_skews[4] = {
"rxd0-skew-ps", "rxd1-skew-ps",
"rxd2-skew-ps", "rxd3-skew-ps"
};
- char *tx_data_skews[4] = {
+ static const char *tx_data_skews[4] = {
"txd0-skew-ps", "txd1-skew-ps",
"txd2-skew-ps", "txd3-skew-ps"
};
- char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+ static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -460,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4);
}
- return 0;
+
+ return ksz9031_center_flp_timing(phydev);
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -519,7 +542,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
- struct device_node *np = phydev->dev.of_node;
+ const struct device_node *np = phydev->dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
int ret;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 14839bc0aaf5..686f37daa262 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -28,8 +28,6 @@
#include <linux/file.h>
#include <linux/in.h>
#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index bcfa01add7cc..7193b7304fdd 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma)
*/
set_current_state(TASK_INTERRUPTIBLE);
cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(30);
+ schedule_timeout(msecs_to_jiffies(300));
irq = probe_irq_off(irqs);
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 08223569cebd..7a72407208b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
msg, i);
goto done;
}
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
rmb();
} while (++i > 0);
netdev_err(dev, "%s timeout\n", msg);
@@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
(dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
break;
smp_rmb();
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
} while (++i > 0);
return (i >= 0 ) ? i : -EAGAIN;
@@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
/* Flush posted writes */
readl(ioaddr + GSTAR);
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
for (i = 0; i < 16; i++)
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
static int dscc4_open(struct net_device *dev)
{
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
- struct dscc4_pci_priv *ppriv;
int ret = -EAGAIN;
if ((dscc4_loopback_check(dpriv) < 0))
@@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev)
if ((ret = hdlc_open(dev)))
goto err;
- ppriv = dpriv->pci_priv;
-
/*
* Due to various bugs, there is no way to reliably reset a
* specific port (manufacturer's dependent special PCI #RST wiring
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 2f5eda8a7227..6676607164d6 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -40,7 +40,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/stat.h>
-#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/lapb.h>
#include <linux/init.h>
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 8c283fcd843d..15f057ed41ad 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1373,9 +1373,9 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
ADM8211_CSR_READ(NAR);
if (priv->nar & ADM8211_NAR_PR)
- dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
else
- dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
adm8211_set_bssid(dev, bcast);
@@ -1861,8 +1861,8 @@ static int adm8211_probe(struct pci_dev *pdev,
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
- /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
- dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
+ /* dev->flags = RX_INCLUDES_FCS in promisc mode */
+ ieee80211_hw_set(dev, SIGNAL_UNSPEC);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
dev->max_signal = 100; /* FIXME: find better value */
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 49219c508963..dab25136214a 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2360,8 +2360,8 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
priv->hw->max_signal = 100;
SET_IEEE80211_DEV(priv->hw, &interface->dev);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 14937cbeca56..3b343c63aa52 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1682,9 +1682,9 @@ static int ar5523_probe(struct usb_interface *intf,
(id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
ar->vif = NULL;
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL;
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
sizeof(struct ar5523_chunk);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c48c744acbcc..218b6af63447 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6836,21 +6836,21 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
- ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_AP_LINK_PS |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SW_CRYPTO_CONTROL |
- IEEE80211_HW_SUPPORT_FAST_XMIT |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_SUPPORTS_PER_STA_GTK |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_CHANCTX_STA_CSA |
- IEEE80211_HW_QUEUE_CONTROL;
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -6859,8 +6859,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
}
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a6131825c9f6..23552f43d125 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2537,12 +2537,12 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
/* Initialize driver private data */
SET_IEEE80211_DEV(hw, ah->dev);
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_RC_TABLE;
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 95690109ee11..39eaf9b6e9b4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -717,18 +717,18 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ath_common *common = ath9k_hw_common(priv->ah);
struct base_eep_header *pBase;
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (ath9k_ps_enable)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(hw, SUPPORTS_PS);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f8d11efa7b0f..eff0e5325e6a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -796,7 +796,7 @@ static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (!ath9k_is_chanctx_enabled())
return;
- hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
hw->queues = ATH9K_NUM_TX_QUEUES;
hw->offchannel_tx_hw_queue = hw->queues - 1;
hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
@@ -818,20 +818,20 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_RC_TABLE |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
if (ath9k_ps_enable)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(hw, SUPPORTS_PS);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (AR_SREV_9280_20_OR_LATER(ah))
hw->radiotap_mcs_details |=
@@ -839,7 +839,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
}
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 020cd46471f5..88045f93a76c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -286,7 +286,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
}
if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
- ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
dev_err(&ar->udev->dev, "firmware does not provide "
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 59db6732d4e3..170c209f99b8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1844,22 +1844,22 @@ void *carl9170_alloc(size_t priv_size)
/* firmware decides which modes we support */
hw->wiphy->interface_modes = 0;
- hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
- IEEE80211_HW_SUPPORTS_RC_TABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
if (!modparam_noht) {
/*
* see the comment above, why we allow the user
* to disable HT by a module parameter.
*/
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
}
hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 0783d2ed8238..900e72a089d8 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -944,12 +944,12 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
WLAN_CIPHER_SUITE_CCMP,
};
- wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
+ ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index dbd894428be6..c9263e1c75d4 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -216,9 +216,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
sta_params->encrypt_type = priv_vif->encrypt_type;
- sta_params->short_preamble_supported =
- !(WCN36XX_FLAGS(wcn) &
- IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+ sta_params->short_preamble_supported = true;
sta_params->rifs_mode = 0;
sta_params->rmf = 0;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9556454da525..28490702124a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5609,8 +5609,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl = hw_to_b43_wl(hw);
/* fill hw info */
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 39d49d6cd07f..afc1fb3e38df 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3832,8 +3832,9 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
}
/* fill hw info */
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index b46cab250615..ab775a5d5b33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1060,10 +1060,9 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
*/
static int ieee_hw_init(struct ieee80211_hw *hw)
{
- hw->flags = IEEE80211_HW_SIGNAL_DBM
- /* | IEEE80211_HW_CONNECTION_MONITOR What is this? */
- | IEEE80211_HW_REPORTS_TX_ACK_STATUS
- | IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3689dbbd10bd..0e51e27d2e3f 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -278,14 +278,14 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
else
priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 189cdf58084b..7f4cb692cc57 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3561,8 +3561,10 @@ il3945_setup_mac(struct il_priv *il)
hw->vif_data_size = sizeof(struct il_vif_priv);
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index e4b175cbeefd..44fa422f255e 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5751,11 +5751,13 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->rate_control_algorithm = "iwl-4965-rs";
/* Tell mac80211 our characteristics */
- hw->flags =
- IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (il->cfg->sku & IL_SKU_N)
hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 852461ffe98f..7acaa266b704 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -104,16 +104,16 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->rate_control_algorithm = "iwl-agn-rs";
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_SUPPORT_FAST_XMIT |
- IEEE80211_HW_WANT_MONITOR_VIF;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -136,7 +136,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
*/
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
!iwlwifi_mod_params.sw_crypto)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->vif_data_size = sizeof(struct iwl_vif_priv);
@@ -1342,9 +1342,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
* other interfaces are added, this is safe.
*/
if (vif->type == NL80211_IFTYPE_MONITOR)
- priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
else
- priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags);
err = iwl_setup_interface(priv, ctx);
if (!err || reset)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8088c7137f7c..1812dd018af2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -852,7 +852,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST |
MAC_FILTER_IN_CRC32);
- mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -1270,7 +1270,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->uploaded = false;
if (vif->type == NL80211_IFTYPE_MONITOR)
- mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 08367fbc3bc4..dfdab38e2d4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -423,19 +423,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
};
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TIMING_BEACON_ONLY |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_CHANCTX_STA_CSA |
- IEEE80211_HW_SUPPORT_FAST_XMIT |
- IEEE80211_HW_SUPPORTS_CLONED_SKBS;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -459,7 +459,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
*/
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
!iwlwifi_mod_params.sw_crypto) {
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
@@ -474,7 +474,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
@@ -2885,7 +2885,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+ WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 1bdf18674fb8..a47f0acc099a 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -634,7 +634,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->tx_skb = NULL;
hw->queues = 1;
- hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
hw->extra_tx_headroom = sizeof(struct txpd);
memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8d2f6bbf9598..99e873dc8684 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1286,7 +1286,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (control->sta)
hwsim_check_sta_magic(control->sta);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -1395,7 +1395,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
{
u32 _pid = ACCESS_ONCE(wmediumd_portid);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
txi->control.rates,
@@ -1432,7 +1432,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
info->control.rates,
ARRAY_SIZE(info->control.rates));
@@ -2391,16 +2391,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
if (param->p2p_device)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
- hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
- IEEE80211_HW_CHANCTX_STA_CSA |
- IEEE80211_HW_SUPPORT_FAST_XMIT;
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
if (rctbl)
- hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -2509,7 +2509,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
}
if (param->no_vif)
- hw->flags |= IEEE80211_HW_NO_AUTO_VIF;
+ ieee80211_hw_set(hw, NO_AUTO_VIF);
err = ieee80211_register_hw(hw);
if (err < 0) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 45eb0796a2e5..df3dd56199a7 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -594,11 +594,11 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
SET_IEEE80211_DEV(hw, dev->dev);
hw->queues = 4;
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_RC_TABLE;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b71fc74d14ab..77361af68b18 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2380,7 +2380,7 @@ mwl8k_set_ht_caps(struct ieee80211_hw *hw,
if (cap & MWL8K_CAP_GREENFIELD)
band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
if (cap & MWL8K_CAP_AMPDU) {
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
}
@@ -5431,7 +5431,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 *addr = sta->addr, idx;
struct mwl8k_sta *sta_info = MWL8K_STA(sta);
- if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ if (!ieee80211_hw_check(hw, AMPDU_AGGREGATION))
return -ENOTSUPP;
spin_lock(&priv->stream_lock);
@@ -6076,14 +6076,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
hw->queues = MWL8K_TX_WMM_QUEUES;
/* Set rssi values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
/*
* Ask mac80211 to not to trigger PS mode
* based on PM bit of incoming frames.
*/
if (priv->ap_fw)
- hw->flags |= IEEE80211_HW_AP_LINK_PS;
+ ieee80211_hw_set(hw, AP_LINK_PS);
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 2947ad21053c..7805864e76f9 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -746,12 +746,12 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
spin_lock_init(&priv->tx_stats_lock);
skb_queue_head_init(&priv->tx_queue);
skb_queue_head_init(&priv->tx_pending);
- dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ ieee80211_hw_set(dev, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(dev, MFP_CAPABLE);
+ ieee80211_hw_set(dev, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(dev, SUPPORTS_PS);
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+ ieee80211_hw_set(dev, SIGNAL_DBM);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index aeaf87bb5518..7e804324bfa7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1062,10 +1062,9 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->priv = adapter;
adapter->hw = hw;
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_AMPDU_AGGREGATION |
- 0;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
hw->queues = MAX_HW_QUEUES;
hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 7da138892026..9a3966cd6fbe 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1574,10 +1574,10 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 4ea53aa9ede3..1a6740b4d396 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1869,10 +1869,10 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 237bbb54c7a8..b50d873145d5 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1696,11 +1696,10 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
/*
* Disable powersaving as default.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c7843769ab71..9524564f873b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7474,13 +7474,12 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -7490,8 +7489,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
* infinitly and thus dropping it after some time.
*/
if (!rt2x00_is_usb(rt2x00dev))
- rt2x00dev->hw->flags |=
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index c8a967247a9a..c0e730ea1b69 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2758,11 +2758,10 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 65ce3afb888a..7081e13b4fd6 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2105,16 +2105,15 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*
- * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
+ * Don't set IEEE80211_HOST_BROADCAST_PS_BUFFERING unless we are
* capable of sending the buffered frames out after the DTIM
* transmission using rt2x00lib_beacondone. This will send out
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 706b844bce00..a43a16fde59d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1802,8 +1802,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
priv->band.n_bitrates = 4;
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+
dev->vif_data_size = sizeof(struct rtl8180_vif);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -1868,9 +1869,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
}
if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
- dev->flags |= IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(dev, SIGNAL_DBM);
else
- dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
+ ieee80211_hw_set(dev, SIGNAL_UNSPEC);
rtl8180_eeprom_read(priv);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 629ad8cfa17b..b7f72f9c7988 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1478,9 +1478,9 @@ static int rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+ ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(dev, SIGNAL_DBM);
/* Initialize rate-control variables */
dev->max_rates = 1;
dev->max_rate_tries = RETRY_COUNT;
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 01f56c7df8b5..0517a4f2d3f2 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -394,20 +394,18 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
}
}
/* <5> set hw caps */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_CONNECTION_MONITOR |
- /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
/* swlps or hwlps has been set in diff chip in init_sw_vars */
- if (rtlpriv->psc.swctrl_lps)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
- 0;
+ if (rtlpriv->psc.swctrl_lps) {
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ }
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index f238ee54226c..cd4777954f87 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1476,7 +1476,8 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PS);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ef3fe0fff588..337223b9f6f8 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6060,19 +6060,19 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* FIXME: find a proper value */
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
- wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_AP_LINK_PS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_CHANCTX_STA_CSA |
- IEEE80211_HW_SUPPORT_FAST_XMIT;
+ ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(wl->hw, AP_LINK_PS);
+ ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PS);
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 89b6f69f09c8..e539d9b1b562 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1397,10 +1397,10 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_UNSPEC);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f1b2c1721917..5485f91294e7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -515,14 +515,9 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
- RING_IDX old_req_cons;
- RING_IDX ring_slots_used;
-
queue->last_rx_time = jiffies;
- old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
- ring_slots_used = queue->rx.req_cons - old_req_cons;
__skb_queue_tail(&rxq, skb);
}
@@ -753,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
slots++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
txp->offset, txp->size);
xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
@@ -879,7 +874,7 @@ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to overwrite active handle! pending_idx: %x\n",
+ "Trying to overwrite active handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -892,7 +887,7 @@ static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to unmap invalid handle! pending_idx: %x\n",
+ "Trying to unmap invalid handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -1248,7 +1243,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(queue->vif->dev,
- "txreq.offset: %x, size: %u, end: %lu\n",
+ "txreq.offset: %u, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(queue->vif);
@@ -1598,12 +1593,12 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
queue->pages_to_unmap,
gop - queue->tx_unmap_ops);
if (ret) {
- netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
gop - queue->tx_unmap_ops, ret);
for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
netdev_err(queue->vif->dev,
- " host_addr: %llx handle: %x status: %d\n",
+ " host_addr: 0x%llx handle: 0x%x status: %d\n",
gop[i].host_addr,
gop[i].handle,
gop[i].status);
@@ -1736,7 +1731,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
&queue->mmap_pages[pending_idx], 1);
if (ret) {
netdev_err(queue->vif->dev,
- "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
ret,
pending_idx,
tx_unmap_op.host_addr,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c89ca26e254d..56d8afd11077 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -733,7 +733,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
if (unlikely(rx->status < 0 ||
rx->offset + rx->status > PAGE_SIZE)) {
if (net_ratelimit())
- dev_warn(dev, "rx->offset: %x, size: %u\n",
+ dev_warn(dev, "rx->offset: %u, size: %d\n",
rx->offset, rx->status);
xennet_move_rx_slot(queue, skb, ref);
err = -EINVAL;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 107714e4405f..722673cb785b 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -72,6 +72,6 @@ source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
source "drivers/nfc/nfcmrvl/Kconfig"
source "drivers/nfc/st21nfca/Kconfig"
-source "drivers/nfc/st21nfcb/Kconfig"
+source "drivers/nfc/st-nci/Kconfig"
source "drivers/nfc/nxp-nci/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index a4292d790f9b..368b6dfe71b3 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -12,7 +12,5 @@ obj-$(CONFIG_NFC_PORT100) += port100.o
obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
-obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb/
+obj-$(CONFIG_NFC_ST_NCI) += st-nci/
obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/
-
-ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 661e2c8143c4..daf352597ef8 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -211,7 +211,6 @@ flush:
static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct microread_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -220,8 +219,6 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 5e18afd9abe2..796be2411440 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -21,3 +21,14 @@ config NFC_MRVL_USB
Say Y here to compile support for Marvell NFC-over-USB driver
into the kernel or say M to compile it as module.
+
+config NFC_MRVL_UART
+ tristate "Marvell NFC-over-UART driver"
+ depends on NFC_MRVL && NFC_NCI_UART
+ help
+ Marvell NFC-over-UART driver.
+
+ This driver provides support for Marvell NFC-over-UART devices
+
+ Say Y here to compile support for Marvell NFC-over-UART driver
+ into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
index 97a0de72dc01..775196274d1f 100644
--- a/drivers/nfc/nfcmrvl/Makefile
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -7,3 +7,6 @@ obj-$(CONFIG_NFC_MRVL) += nfcmrvl.o
nfcmrvl_usb-y += usb.o
obj-$(CONFIG_NFC_MRVL_USB) += nfcmrvl_usb.o
+
+nfcmrvl_uart-y += uart.o
+obj-$(CONFIG_NFC_MRVL_UART) += nfcmrvl_uart.o
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index ad4933cefbd1..4a8866d62941 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -17,6 +17,9 @@
*/
#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
@@ -63,20 +66,25 @@ static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
if (!test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
return -EBUSY;
+ if (priv->config.hci_muxed) {
+ unsigned char *hdr;
+ unsigned char len = skb->len;
+
+ hdr = (char *) skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
+ hdr[0] = NFCMRVL_HCI_COMMAND_CODE;
+ hdr[1] = NFCMRVL_HCI_OGF;
+ hdr[2] = NFCMRVL_HCI_OCF;
+ hdr[3] = len;
+ }
+
return priv->if_ops->nci_send(priv, skb);
}
static int nfcmrvl_nci_setup(struct nci_dev *ndev)
{
- __u8 val;
-
- val = NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED;
- nci_set_config(ndev, NFCMRVL_NOT_ALLOWED_ID, 1, &val);
- val = NFCMRVL_GPIO_PIN_NFC_ACTIVE;
- nci_set_config(ndev, NFCMRVL_ACTIVE_ID, 1, &val);
- val = NFCMRVL_EXT_COEX_ENABLE;
- nci_set_config(ndev, NFCMRVL_EXT_COEX_ID, 1, &val);
+ __u8 val = 1;
+ nci_set_config(ndev, NFCMRVL_PB_BAIL_OUT, 1, &val);
return 0;
}
@@ -88,11 +96,13 @@ static struct nci_ops nfcmrvl_nci_ops = {
};
struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
- struct nfcmrvl_if_ops *ops,
- struct device *dev)
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev,
+ struct nfcmrvl_platform_data *pdata)
{
struct nfcmrvl_private *priv;
int rc;
+ int headroom = 0;
u32 protocols;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -103,13 +113,30 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
priv->if_ops = ops;
priv->dev = dev;
+ memcpy(&priv->config, pdata, sizeof(*pdata));
+
+ if (priv->config.reset_n_io) {
+ rc = devm_gpio_request_one(dev,
+ priv->config.reset_n_io,
+ GPIOF_OUT_INIT_LOW,
+ "nfcmrvl_reset_n");
+ if (rc < 0)
+ nfc_err(dev, "failed to request reset_n io\n");
+ }
+
+ if (priv->config.hci_muxed)
+ headroom = NFCMRVL_HCI_EVENT_HEADER_SIZE;
+
protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_MIFARE_MASK
+ | NFC_PROTO_FELICA_MASK
| NFC_PROTO_ISO14443_MASK
| NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_ISO15693_MASK
| NFC_PROTO_NFC_DEP_MASK;
- priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
+ priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols,
+ headroom, 0);
if (!priv->ndev) {
nfc_err(dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
@@ -118,6 +145,8 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
nci_set_drvdata(priv->ndev, priv);
+ nfcmrvl_chip_reset(priv);
+
rc = nci_register_device(priv->ndev);
if (rc) {
nfc_err(dev, "nci_register_device failed %d\n", rc);
@@ -144,21 +173,84 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev);
-int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count)
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
- skb = nci_skb_alloc(priv->ndev, count, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ if (priv->config.hci_muxed) {
+ if (skb->data[0] == NFCMRVL_HCI_EVENT_CODE &&
+ skb->data[1] == NFCMRVL_HCI_NFC_EVENT_CODE) {
+ /* Data packet, let's extract NCI payload */
+ skb_pull(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
+ } else {
+ /* Skip this packet */
+ kfree_skb(skb);
+ return 0;
+ }
+ }
- memcpy(skb_put(skb, count), data, count);
- nci_recv_frame(priv->ndev, skb);
+ if (test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ nci_recv_frame(priv->ndev, skb);
+ else {
+ /* Drop this packet since nobody wants it */
+ kfree_skb(skb);
+ return 0;
+ }
- return count;
+ return 0;
}
EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame);
+void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+{
+ /*
+ * This function does not take care if someone is using the device.
+ * To be improved.
+ */
+
+ if (priv->config.reset_n_io) {
+ nfc_info(priv->dev, "reset the chip\n");
+ gpio_set_value(priv->config.reset_n_io, 0);
+ usleep_range(5000, 10000);
+ gpio_set_value(priv->config.reset_n_io, 1);
+ } else
+ nfc_info(priv->dev, "no reset available on this interface\n");
+}
+
+#ifdef CONFIG_OF
+
+int nfcmrvl_parse_dt(struct device_node *node,
+ struct nfcmrvl_platform_data *pdata)
+{
+ int reset_n_io;
+
+ reset_n_io = of_get_named_gpio(node, "reset-n-io", 0);
+ if (reset_n_io < 0) {
+ pr_info("no reset-n-io config\n");
+ reset_n_io = 0;
+ } else if (!gpio_is_valid(reset_n_io)) {
+ pr_err("invalid reset-n-io GPIO\n");
+ return reset_n_io;
+ }
+ pdata->reset_n_io = reset_n_io;
+
+ if (of_find_property(node, "hci-muxed", NULL))
+ pdata->hci_muxed = 1;
+ else
+ pdata->hci_muxed = 0;
+
+ return 0;
+}
+
+#else
+
+int nfcmrvl_parse_dt(struct device_node *node,
+ struct nfcmrvl_platform_data *pdata)
+{
+ return -ENODEV;
+}
+
+#endif
+EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt);
+
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell NFC driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index 54c4a956bd45..e5a7e5464f2e 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -16,6 +16,11 @@
* this warranty disclaimer.
**/
+#ifndef _NFCMRVL_H_
+#define _NFCMRVL_H_
+
+#include <linux/platform_data/nfcmrvl.h>
+
/* Define private flags: */
#define NFCMRVL_NCI_RUNNING 1
@@ -27,11 +32,49 @@
#define NFCMRVL_GPIO_PIN_NFC_ACTIVE 0xB
#define NFCMRVL_NCI_MAX_EVENT_SIZE 260
+/*
+** NCI FW Parmaters
+*/
+
+#define NFCMRVL_PB_BAIL_OUT 0x11
+
+/*
+** HCI defines
+*/
+
+#define NFCMRVL_HCI_EVENT_HEADER_SIZE 0x04
+#define NFCMRVL_HCI_EVENT_CODE 0x04
+#define NFCMRVL_HCI_NFC_EVENT_CODE 0xFF
+#define NFCMRVL_HCI_COMMAND_CODE 0x01
+#define NFCMRVL_HCI_OGF 0x81
+#define NFCMRVL_HCI_OCF 0xFE
+
+enum nfcmrvl_phy {
+ NFCMRVL_PHY_USB = 0,
+ NFCMRVL_PHY_UART = 1,
+};
+
+
struct nfcmrvl_private {
- struct nci_dev *ndev;
+
unsigned long flags;
+
+ /* Platform configuration */
+ struct nfcmrvl_platform_data config;
+
+ struct nci_dev *ndev;
+
+ /*
+ ** PHY related information
+ */
+
+ /* PHY driver context */
void *drv_data;
+ /* PHY device */
struct device *dev;
+ /* PHY type */
+ enum nfcmrvl_phy phy;
+ /* Low level driver ops */
struct nfcmrvl_if_ops *if_ops;
};
@@ -42,7 +85,16 @@ struct nfcmrvl_if_ops {
};
void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
-int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count);
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
- struct nfcmrvl_if_ops *ops,
- struct device *dev);
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev,
+ struct nfcmrvl_platform_data *pdata);
+
+
+void nfcmrvl_chip_reset(struct nfcmrvl_private *priv);
+
+int nfcmrvl_parse_dt(struct device_node *node,
+ struct nfcmrvl_platform_data *pdata);
+
+#endif
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
new file mode 100644
index 000000000000..61442d6528a6
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -0,0 +1,225 @@
+/**
+ * Marvell NFC-over-UART driver
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+static unsigned int hci_muxed;
+static unsigned int flow_control;
+static unsigned int break_control;
+static unsigned int reset_n_io;
+
+/*
+** NFCMRVL NCI OPS
+*/
+
+static int nfcmrvl_uart_nci_open(struct nfcmrvl_private *priv)
+{
+ return 0;
+}
+
+static int nfcmrvl_uart_nci_close(struct nfcmrvl_private *priv)
+{
+ return 0;
+}
+
+static int nfcmrvl_uart_nci_send(struct nfcmrvl_private *priv,
+ struct sk_buff *skb)
+{
+ struct nci_uart *nu = priv->drv_data;
+
+ return nu->ops.send(nu, skb);
+}
+
+static struct nfcmrvl_if_ops uart_ops = {
+ .nci_open = nfcmrvl_uart_nci_open,
+ .nci_close = nfcmrvl_uart_nci_close,
+ .nci_send = nfcmrvl_uart_nci_send,
+};
+
+#ifdef CONFIG_OF
+
+static int nfcmrvl_uart_parse_dt(struct device_node *node,
+ struct nfcmrvl_platform_data *pdata)
+{
+ struct device_node *matched_node;
+ int ret;
+
+ matched_node = of_find_compatible_node(node, NULL, "mrvl,nfc-uart");
+ if (!matched_node)
+ return -ENODEV;
+
+ ret = nfcmrvl_parse_dt(matched_node, pdata);
+ if (ret < 0) {
+ pr_err("Failed to get generic entries\n");
+ return ret;
+ }
+
+ if (of_find_property(matched_node, "flow-control", NULL))
+ pdata->flow_control = 1;
+ else
+ pdata->flow_control = 0;
+
+ if (of_find_property(matched_node, "break-control", NULL))
+ pdata->break_control = 1;
+ else
+ pdata->break_control = 0;
+
+ return 0;
+}
+
+#else
+
+static int nfcmrvl_uart_parse_dt(struct device_node *node,
+ struct nfcmrvl_platform_data *pdata)
+{
+ return -ENODEV;
+}
+
+#endif
+
+/*
+** NCI UART OPS
+*/
+
+static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
+{
+ struct nfcmrvl_private *priv;
+ struct nfcmrvl_platform_data *pdata = NULL;
+ struct nfcmrvl_platform_data config;
+
+ /*
+ * Platform data cannot be used here since usually it is already used
+ * by low level serial driver. We can try to retrieve serial device
+ * and check if DT entries were added.
+ */
+
+ if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
+ if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
+ &config) == 0)
+ pdata = &config;
+
+ if (!pdata) {
+ pr_info("No platform data / DT -> fallback to module params\n");
+ config.hci_muxed = hci_muxed;
+ config.reset_n_io = reset_n_io;
+ config.flow_control = flow_control;
+ config.break_control = break_control;
+ pdata = &config;
+ }
+
+ priv = nfcmrvl_nci_register_dev(nu, &uart_ops, nu->tty->dev, pdata);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->phy = NFCMRVL_PHY_UART;
+
+ nu->drv_data = priv;
+ nu->ndev = priv->ndev;
+
+ /* Set BREAK */
+ if (priv->config.break_control && nu->tty->ops->break_ctl)
+ nu->tty->ops->break_ctl(nu->tty, -1);
+
+ return 0;
+}
+
+static void nfcmrvl_nci_uart_close(struct nci_uart *nu)
+{
+ nfcmrvl_nci_unregister_dev((struct nfcmrvl_private *)nu->drv_data);
+}
+
+static int nfcmrvl_nci_uart_recv(struct nci_uart *nu, struct sk_buff *skb)
+{
+ return nfcmrvl_nci_recv_frame((struct nfcmrvl_private *)nu->drv_data,
+ skb);
+}
+
+static void nfcmrvl_nci_uart_tx_start(struct nci_uart *nu)
+{
+ struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data;
+
+ /* Remove BREAK to wake up the NFCC */
+ if (priv->config.break_control && nu->tty->ops->break_ctl) {
+ nu->tty->ops->break_ctl(nu->tty, 0);
+ usleep_range(3000, 5000);
+ }
+}
+
+static void nfcmrvl_nci_uart_tx_done(struct nci_uart *nu)
+{
+ struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data;
+
+ /*
+ ** To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
+ ** up. we set BREAK. Once we will be ready to send again we will remove
+ ** it.
+ */
+ if (priv->config.break_control && nu->tty->ops->break_ctl)
+ nu->tty->ops->break_ctl(nu->tty, -1);
+}
+
+static struct nci_uart nfcmrvl_nci_uart = {
+ .owner = THIS_MODULE,
+ .name = "nfcmrvl_uart",
+ .driver = NCI_UART_DRIVER_MARVELL,
+ .ops = {
+ .open = nfcmrvl_nci_uart_open,
+ .close = nfcmrvl_nci_uart_close,
+ .recv = nfcmrvl_nci_uart_recv,
+ .tx_start = nfcmrvl_nci_uart_tx_start,
+ .tx_done = nfcmrvl_nci_uart_tx_done,
+ }
+};
+
+/*
+** Module init
+*/
+
+static int nfcmrvl_uart_init_module(void)
+{
+ return nci_uart_register(&nfcmrvl_nci_uart);
+}
+
+static void nfcmrvl_uart_exit_module(void)
+{
+ nci_uart_unregister(&nfcmrvl_nci_uart);
+}
+
+module_init(nfcmrvl_uart_init_module);
+module_exit(nfcmrvl_uart_exit_module);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-UART");
+MODULE_LICENSE("GPL v2");
+
+module_param(flow_control, uint, 0);
+MODULE_PARM_DESC(flow_control, "Tell if UART needs flow control at init.");
+
+module_param(break_control, uint, 0);
+MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
+
+module_param(hci_muxed, uint, 0);
+MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
+
+module_param(reset_n_io, uint, 0);
+MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 6cf15c1a2618..7d1fe436c9f6 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -26,7 +26,8 @@
#define VERSION "1.0"
static struct usb_device_id nfcmrvl_table[] = {
- { USB_DEVICE_INTERFACE_CLASS(0x1286, 0x2046, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1286, 0x2046,
+ USB_CLASS_VENDOR_SPEC, 4, 1) },
{ } /* Terminating entry */
};
@@ -69,18 +70,27 @@ static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data)
static void nfcmrvl_bulk_complete(struct urb *urb)
{
struct nfcmrvl_usb_drv_data *drv_data = urb->context;
+ struct sk_buff *skb;
int err;
- dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d",
+ dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n",
urb, urb->status, urb->actual_length);
if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
return;
if (!urb->status) {
- if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
- urb->actual_length) < 0)
- nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n");
+ skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length,
+ GFP_ATOMIC);
+ if (!skb) {
+ nfc_err(&drv_data->udev->dev, "failed to alloc mem\n");
+ } else {
+ memcpy(skb_put(skb, urb->actual_length),
+ urb->transfer_buffer, urb->actual_length);
+ if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
+ nfc_err(&drv_data->udev->dev,
+ "corrupted Rx packet\n");
+ }
}
if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
@@ -292,6 +302,10 @@ static int nfcmrvl_probe(struct usb_interface *intf,
struct nfcmrvl_private *priv;
int i;
struct usb_device *udev = interface_to_usbdev(intf);
+ struct nfcmrvl_platform_data config;
+
+ /* No configuration for USB */
+ memset(&config, 0, sizeof(config));
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
@@ -329,11 +343,12 @@ static int nfcmrvl_probe(struct usb_interface *intf,
init_usb_anchor(&drv_data->deferred);
priv = nfcmrvl_nci_register_dev(drv_data, &usb_ops,
- &drv_data->udev->dev);
+ &drv_data->udev->dev, &config);
if (IS_ERR(priv))
return PTR_ERR(priv);
drv_data->priv = priv;
+ drv_data->priv->phy = NFCMRVL_PHY_USB;
priv->dev = &drv_data->udev->dev;
usb_set_intfdata(intf, drv_data);
diff --git a/drivers/nfc/nxp-nci/Makefile b/drivers/nfc/nxp-nci/Makefile
index c008be30bb18..c9ec7869dbd2 100644
--- a/drivers/nfc/nxp-nci/Makefile
+++ b/drivers/nfc/nxp-nci/Makefile
@@ -7,5 +7,3 @@ nxp-nci_i2c-objs = i2c.o
obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci.o
obj-$(CONFIG_NFC_NXP_NCI_I2C) += nxp-nci_i2c.o
-
-ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 17bd67dbebf0..2f77f1d03638 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -2,8 +2,10 @@
* I2C link layer for the NXP NCI driver
*
* Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ * Copyright (C) 2012-2015 Intel Corporation. All rights reserved.
*
* Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ * Authors: Oleg Zhurakivskyy <oleg.zhurakivskyy@intel.com>
*
* Derived from PN544 device driver:
* Copyright (C) 2012 Intel Corporation. All rights reserved.
@@ -23,12 +25,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/nfc.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/platform_data/nxp-nci.h>
@@ -48,6 +52,7 @@ struct nxp_nci_i2c_phy {
unsigned int gpio_en;
unsigned int gpio_fw;
+ unsigned int gpio_irq;
int hard_fault; /*
* < 0 if hardware error occurred (e.g. i2c err)
@@ -308,6 +313,37 @@ static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
#endif
+static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy)
+{
+ struct i2c_client *client = phy->i2c_dev;
+ struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
+
+ gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
+ gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
+ gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+
+ if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
+ nfc_err(&client->dev, "No GPIOs\n");
+ return -EINVAL;
+ }
+
+ gpiod_direction_output(gpiod_en, 0);
+ gpiod_direction_output(gpiod_fw, 0);
+ gpiod_direction_input(gpiod_irq);
+
+ client->irq = gpiod_to_irq(gpiod_irq);
+ if (client->irq < 0) {
+ nfc_err(&client->dev, "No IRQ\n");
+ return -EINVAL;
+ }
+
+ phy->gpio_en = desc_to_gpio(gpiod_en);
+ phy->gpio_fw = desc_to_gpio(gpiod_fw);
+ phy->gpio_irq = desc_to_gpio(gpiod_irq);
+
+ return 0;
+}
+
static int nxp_nci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -343,6 +379,11 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
phy->gpio_en = pdata->gpio_en;
phy->gpio_fw = pdata->gpio_fw;
client->irq = pdata->irq;
+ } else if (ACPI_HANDLE(&client->dev)) {
+ r = nxp_nci_i2c_acpi_config(phy);
+ if (r < 0)
+ goto probe_exit;
+ goto nci_probe;
} else {
nfc_err(&client->dev, "No platform data\n");
r = -EINVAL;
@@ -359,6 +400,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
if (r < 0)
goto probe_exit;
+nci_probe:
r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops,
NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev);
if (r < 0)
@@ -397,10 +439,19 @@ static const struct of_device_id of_nxp_nci_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
+#ifdef CONFIG_ACPI
+static struct acpi_device_id acpi_id[] = {
+ { "NXP7471" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_id);
+#endif
+
static struct i2c_driver nxp_nci_i2c_driver = {
.driver = {
.name = NXP_NCI_I2C_DRIVER_NAME,
.owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(acpi_id),
.of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
},
.probe = nxp_nci_i2c_probe,
@@ -413,3 +464,4 @@ module_i2c_driver(nxp_nci_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("I2C driver for NXP NCI NFC controllers");
MODULE_AUTHOR("Clément Perrochaud <clement.perrochaud@nxp.com>");
+MODULE_AUTHOR("Oleg Zhurakivskyy <oleg.zhurakivskyy@intel.com>");
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 6fd986f5ac3e..fa75c53f3fa5 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -895,56 +895,35 @@ static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
return -ENODEV;
/* Get EN GPIO from ACPI */
- gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1);
+ gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1,
+ GPIOD_OUT_LOW);
if (IS_ERR(gpiod_en)) {
- nfc_err(dev,
- "Unable to get EN GPIO\n");
+ nfc_err(dev, "Unable to get EN GPIO\n");
return -ENODEV;
}
- phy->gpio_en = desc_to_gpio(gpiod_en);
-
- /* Configuration EN GPIO */
- ret = gpiod_direction_output(gpiod_en, 0);
- if (ret) {
- nfc_err(dev, "Fail EN pin direction\n");
- return ret;
- }
+ phy->gpio_en = desc_to_gpio(gpiod_en);
/* Get FW GPIO from ACPI */
- gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2);
+ gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2,
+ GPIOD_OUT_LOW);
if (IS_ERR(gpiod_fw)) {
- nfc_err(dev,
- "Unable to get FW GPIO\n");
+ nfc_err(dev, "Unable to get FW GPIO\n");
return -ENODEV;
}
- phy->gpio_fw = desc_to_gpio(gpiod_fw);
-
- /* Configuration FW GPIO */
- ret = gpiod_direction_output(gpiod_fw, 0);
- if (ret) {
- nfc_err(dev, "Fail FW pin direction\n");
- return ret;
- }
+ phy->gpio_fw = desc_to_gpio(gpiod_fw);
/* Get IRQ GPIO */
- gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0);
+ gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0,
+ GPIOD_IN);
if (IS_ERR(gpiod_irq)) {
- nfc_err(dev,
- "Unable to get IRQ GPIO\n");
+ nfc_err(dev, "Unable to get IRQ GPIO\n");
return -ENODEV;
}
phy->gpio_irq = desc_to_gpio(gpiod_irq);
- /* Configure IRQ GPIO */
- ret = gpiod_direction_input(gpiod_irq);
- if (ret) {
- nfc_err(dev, "Fail IRQ pin direction\n");
- return ret;
- }
-
/* Map the pin to an IRQ */
ret = gpiod_to_irq(gpiod_irq);
if (ret < 0) {
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
new file mode 100644
index 000000000000..fc3904c946ee
--- /dev/null
+++ b/drivers/nfc/st-nci/Kconfig
@@ -0,0 +1,23 @@
+config NFC_ST_NCI
+ tristate "STMicroelectronics ST NCI NFC driver"
+ depends on NFC_NCI
+ default n
+ ---help---
+ STMicroelectronics NFC NCI chips core driver. It implements the chipset
+ NCI logic and hooks into the NFC kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will
+ be called st-nci.
+ Say N if unsure.
+
+config NFC_ST_NCI_I2C
+ tristate "NFC ST NCI i2c support"
+ depends on NFC_ST_NCI && I2C
+ ---help---
+ This module adds support for an I2C interface to the
+ STMicroelectronics NFC NCI chips familly.
+ Select this if your platform is using the i2c bus.
+
+ If you choose to build a module, it'll be called st-nci_i2c.
+ Say N if unsure.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
new file mode 100644
index 000000000000..0df157df3a94
--- /dev/null
+++ b/drivers/nfc/st-nci/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for ST21NFCB NCI based NFC driver
+#
+
+st-nci-objs = ndlc.o core.o st-nci_se.o
+obj-$(CONFIG_NFC_ST_NCI) += st-nci.o
+
+st-nci_i2c-objs = i2c.o
+obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c
new file mode 100644
index 000000000000..c419d3943973
--- /dev/null
+++ b/drivers/nfc/st-nci/core.c
@@ -0,0 +1,179 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "st-nci.h"
+#include "st-nci_se.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+#define ST_NCI1_X_PROPRIETARY_ISO15693 0x83
+
+static int st_nci_init(struct nci_dev *ndev)
+{
+ struct nci_mode_set_cmd cmd;
+
+ cmd.cmd_type = ST_NCI_SET_NFC_MODE;
+ cmd.mode = 1;
+
+ return nci_prop_cmd(ndev, ST_NCI_CORE_PROP,
+ sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd);
+}
+
+static int st_nci_open(struct nci_dev *ndev)
+{
+ struct st_nci_info *info = nci_get_drvdata(ndev);
+ int r;
+
+ if (test_and_set_bit(ST_NCI_RUNNING, &info->flags))
+ return 0;
+
+ r = ndlc_open(info->ndlc);
+ if (r)
+ clear_bit(ST_NCI_RUNNING, &info->flags);
+
+ return r;
+}
+
+static int st_nci_close(struct nci_dev *ndev)
+{
+ struct st_nci_info *info = nci_get_drvdata(ndev);
+
+ if (!test_bit(ST_NCI_RUNNING, &info->flags))
+ return 0;
+
+ ndlc_close(info->ndlc);
+
+ clear_bit(ST_NCI_RUNNING, &info->flags);
+
+ return 0;
+}
+
+static int st_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct st_nci_info *info = nci_get_drvdata(ndev);
+
+ skb->dev = (void *)ndev;
+
+ if (!test_bit(ST_NCI_RUNNING, &info->flags))
+ return -EBUSY;
+
+ return ndlc_send(info->ndlc, skb);
+}
+
+static __u32 st_nci_get_rfprotocol(struct nci_dev *ndev,
+ __u8 rf_protocol)
+{
+ return rf_protocol == ST_NCI1_X_PROPRIETARY_ISO15693 ?
+ NFC_PROTO_ISO15693_MASK : 0;
+}
+
+static int st_nci_prop_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nci_req_complete(ndev, status);
+ return 0;
+}
+
+static struct nci_prop_ops st_nci_prop_ops[] = {
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ ST_NCI_CORE_PROP),
+ .rsp = st_nci_prop_rsp_packet,
+ },
+};
+
+static struct nci_ops st_nci_ops = {
+ .init = st_nci_init,
+ .open = st_nci_open,
+ .close = st_nci_close,
+ .send = st_nci_send,
+ .get_rfprotocol = st_nci_get_rfprotocol,
+ .discover_se = st_nci_discover_se,
+ .enable_se = st_nci_enable_se,
+ .disable_se = st_nci_disable_se,
+ .se_io = st_nci_se_io,
+ .hci_load_session = st_nci_hci_load_session,
+ .hci_event_received = st_nci_hci_event_received,
+ .hci_cmd_received = st_nci_hci_cmd_received,
+ .prop_ops = st_nci_prop_ops,
+ .n_prop_ops = ARRAY_SIZE(st_nci_prop_ops),
+};
+
+int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+ int phy_tailroom)
+{
+ struct st_nci_info *info;
+ int r;
+ u32 protocols;
+
+ info = devm_kzalloc(ndlc->dev,
+ sizeof(struct st_nci_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK
+ | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_ISO15693_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ ndlc->ndev = nci_allocate_device(&st_nci_ops, protocols,
+ phy_headroom, phy_tailroom);
+ if (!ndlc->ndev) {
+ pr_err("Cannot allocate nfc ndev\n");
+ return -ENOMEM;
+ }
+ info->ndlc = ndlc;
+
+ nci_set_drvdata(ndlc->ndev, info);
+
+ r = nci_register_device(ndlc->ndev);
+ if (r) {
+ pr_err("Cannot register nfc device to nci core\n");
+ nci_free_device(ndlc->ndev);
+ return r;
+ }
+
+ return st_nci_se_init(ndlc->ndev);
+}
+EXPORT_SYMBOL_GPL(st_nci_probe);
+
+void st_nci_remove(struct nci_dev *ndev)
+{
+ struct st_nci_info *info = nci_get_drvdata(ndev);
+
+ ndlc_close(info->ndlc);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+}
+EXPORT_SYMBOL_GPL(st_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st-nci/i2c.c
index 76a4cad41cec..06175ce769bb 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -1,6 +1,6 @@
/*
- * I2C Link Layer for ST21NFCB NCI based Driver
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * I2C Link Layer for ST NCI NFC controller familly based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/nfc.h>
-#include <linux/platform_data/st21nfcb.h>
+#include <linux/platform_data/st_nci.h>
#include "ndlc.h"
@@ -35,25 +35,23 @@
#define ST21NFCB_FRAME_HEADROOM 1
#define ST21NFCB_FRAME_TAILROOM 0
-#define ST21NFCB_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */
-#define ST21NFCB_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
+#define ST_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
-#define ST21NFCB_NCI_I2C_DRIVER_NAME "st21nfcb_nci_i2c"
+#define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c"
-static struct i2c_device_id st21nfcb_nci_i2c_id_table[] = {
- {ST21NFCB_NCI_DRIVER_NAME, 0},
+static struct i2c_device_id st_nci_i2c_id_table[] = {
+ {ST_NCI_DRIVER_NAME, 0},
{}
};
-MODULE_DEVICE_TABLE(i2c, st21nfcb_nci_i2c_id_table);
+MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table);
-struct st21nfcb_i2c_phy {
+struct st_nci_i2c_phy {
struct i2c_client *i2c_dev;
struct llt_ndlc *ndlc;
unsigned int gpio_reset;
unsigned int irq_polarity;
-
- int powered;
};
#define I2C_DUMP_SKB(info, skb) \
@@ -63,33 +61,26 @@ do { \
16, 1, (skb)->data, (skb)->len, 0); \
} while (0)
-static int st21nfcb_nci_i2c_enable(void *phy_id)
+static int st_nci_i2c_enable(void *phy_id)
{
- struct st21nfcb_i2c_phy *phy = phy_id;
+ struct st_nci_i2c_phy *phy = phy_id;
gpio_set_value(phy->gpio_reset, 0);
usleep_range(10000, 15000);
gpio_set_value(phy->gpio_reset, 1);
- phy->powered = 1;
usleep_range(80000, 85000);
+ if (phy->ndlc->powered == 0)
+ enable_irq(phy->i2c_dev->irq);
+
return 0;
}
-static void st21nfcb_nci_i2c_disable(void *phy_id)
+static void st_nci_i2c_disable(void *phy_id)
{
- struct st21nfcb_i2c_phy *phy = phy_id;
-
- phy->powered = 0;
- /* reset chip in order to flush clf */
- gpio_set_value(phy->gpio_reset, 0);
- usleep_range(10000, 15000);
- gpio_set_value(phy->gpio_reset, 1);
-}
+ struct st_nci_i2c_phy *phy = phy_id;
-static void st21nfcb_nci_remove_header(struct sk_buff *skb)
-{
- skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
+ disable_irq_nosync(phy->i2c_dev->irq);
}
/*
@@ -97,13 +88,13 @@ static void st21nfcb_nci_remove_header(struct sk_buff *skb)
* It must return either zero for success, or <0 for error.
* In addition, it must not alter the skb
*/
-static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
{
int r = -1;
- struct st21nfcb_i2c_phy *phy = phy_id;
+ struct st_nci_i2c_phy *phy = phy_id;
struct i2c_client *client = phy->i2c_dev;
- I2C_DUMP_SKB("st21nfcb_nci_i2c_write", skb);
+ I2C_DUMP_SKB("st_nci_i2c_write", skb);
if (phy->ndlc->hard_fault != 0)
return phy->ndlc->hard_fault;
@@ -121,8 +112,6 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
r = 0;
}
- st21nfcb_nci_remove_header(skb);
-
return r;
}
@@ -135,40 +124,40 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
* at end of read)
* -EREMOTEIO : i2c read error (fatal)
* -EBADMSG : frame was incorrect and discarded
- * (value returned from st21nfcb_nci_i2c_repack)
+ * (value returned from st_nci_i2c_repack)
* -EIO : if no ST21NFCB_SOF_EOF is found after reaching
* the read length end sequence
*/
-static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
struct sk_buff **skb)
{
int r;
u8 len;
- u8 buf[ST21NFCB_NCI_I2C_MAX_SIZE];
+ u8 buf[ST_NCI_I2C_MAX_SIZE];
struct i2c_client *client = phy->i2c_dev;
- r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE);
if (r < 0) { /* Retry, chip was in standby */
usleep_range(1000, 4000);
- r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE);
}
- if (r != ST21NFCB_NCI_I2C_MIN_SIZE)
+ if (r != ST_NCI_I2C_MIN_SIZE)
return -EREMOTEIO;
len = be16_to_cpu(*(__be16 *) (buf + 2));
- if (len > ST21NFCB_NCI_I2C_MAX_SIZE) {
+ if (len > ST_NCI_I2C_MAX_SIZE) {
nfc_err(&client->dev, "invalid frame len\n");
return -EBADMSG;
}
- *skb = alloc_skb(ST21NFCB_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
+ *skb = alloc_skb(ST_NCI_I2C_MIN_SIZE + len, GFP_KERNEL);
if (*skb == NULL)
return -ENOMEM;
- skb_reserve(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
- skb_put(*skb, ST21NFCB_NCI_I2C_MIN_SIZE);
- memcpy((*skb)->data, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ skb_reserve(*skb, ST_NCI_I2C_MIN_SIZE);
+ skb_put(*skb, ST_NCI_I2C_MIN_SIZE);
+ memcpy((*skb)->data, buf, ST_NCI_I2C_MIN_SIZE);
if (!len)
return 0;
@@ -180,7 +169,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
}
skb_put(*skb, len);
- memcpy((*skb)->data + ST21NFCB_NCI_I2C_MIN_SIZE, buf, len);
+ memcpy((*skb)->data + ST_NCI_I2C_MIN_SIZE, buf, len);
I2C_DUMP_SKB("i2c frame read", *skb);
@@ -192,9 +181,9 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
*
* On ST21NFCB, IRQ goes in idle state when read starts.
*/
-static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
- struct st21nfcb_i2c_phy *phy = phy_id;
+ struct st_nci_i2c_phy *phy = phy_id;
struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -210,12 +199,12 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
- if (!phy->powered) {
- st21nfcb_nci_i2c_disable(phy);
+ if (!phy->ndlc->powered) {
+ st_nci_i2c_disable(phy);
return IRQ_HANDLED;
}
- r = st21nfcb_nci_i2c_read(phy, &skb);
+ r = st_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
return IRQ_HANDLED;
@@ -225,15 +214,15 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
}
static struct nfc_phy_ops i2c_phy_ops = {
- .write = st21nfcb_nci_i2c_write,
- .enable = st21nfcb_nci_i2c_enable,
- .disable = st21nfcb_nci_i2c_disable,
+ .write = st_nci_i2c_write,
+ .enable = st_nci_i2c_enable,
+ .disable = st_nci_i2c_disable,
};
#ifdef CONFIG_OF
-static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+static int st_nci_i2c_of_request_resources(struct i2c_client *client)
{
- struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+ struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
struct device_node *pp;
int gpio;
int r;
@@ -264,16 +253,16 @@ static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
return 0;
}
#else
-static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
+static int st_nci_i2c_of_request_resources(struct i2c_client *client)
{
return -ENODEV;
}
#endif
-static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
+static int st_nci_i2c_request_resources(struct i2c_client *client)
{
- struct st21nfcb_nfc_platform_data *pdata;
- struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+ struct st_nci_nfc_platform_data *pdata;
+ struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
int r;
pdata = client->dev.platform_data;
@@ -296,11 +285,11 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
return 0;
}
-static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
+static int st_nci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct st21nfcb_i2c_phy *phy;
- struct st21nfcb_nfc_platform_data *pdata;
+ struct st_nci_i2c_phy *phy;
+ struct st_nci_nfc_platform_data *pdata;
int r;
dev_dbg(&client->dev, "%s\n", __func__);
@@ -311,7 +300,7 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
- phy = devm_kzalloc(&client->dev, sizeof(struct st21nfcb_i2c_phy),
+ phy = devm_kzalloc(&client->dev, sizeof(struct st_nci_i2c_phy),
GFP_KERNEL);
if (!phy)
return -ENOMEM;
@@ -322,13 +311,13 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
if (!pdata && client->dev.of_node) {
- r = st21nfcb_nci_i2c_of_request_resources(client);
+ r = st_nci_i2c_of_request_resources(client);
if (r) {
nfc_err(&client->dev, "No platform data\n");
return r;
}
} else if (pdata) {
- r = st21nfcb_nci_i2c_request_resources(client);
+ r = st_nci_i2c_request_resources(client);
if (r) {
nfc_err(&client->dev,
"Cannot get platform resources\n");
@@ -349,50 +338,48 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
}
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- st21nfcb_nci_irq_thread_fn,
+ st_nci_irq_thread_fn,
phy->irq_polarity | IRQF_ONESHOT,
- ST21NFCB_NCI_DRIVER_NAME, phy);
+ ST_NCI_DRIVER_NAME, phy);
if (r < 0)
nfc_err(&client->dev, "Unable to register IRQ handler\n");
return r;
}
-static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
+static int st_nci_i2c_remove(struct i2c_client *client)
{
- struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
+ struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
dev_dbg(&client->dev, "%s\n", __func__);
ndlc_remove(phy->ndlc);
- if (phy->powered)
- st21nfcb_nci_i2c_disable(phy);
-
return 0;
}
#ifdef CONFIG_OF
-static const struct of_device_id of_st21nfcb_i2c_match[] = {
+static const struct of_device_id of_st_nci_i2c_match[] = {
{ .compatible = "st,st21nfcb-i2c", },
{ .compatible = "st,st21nfcb_i2c", },
+ { .compatible = "st,st21nfcc-i2c", },
{}
};
-MODULE_DEVICE_TABLE(of, of_st21nfcb_i2c_match);
+MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match);
#endif
-static struct i2c_driver st21nfcb_nci_i2c_driver = {
+static struct i2c_driver st_nci_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = ST21NFCB_NCI_I2C_DRIVER_NAME,
- .of_match_table = of_match_ptr(of_st21nfcb_i2c_match),
+ .name = ST_NCI_I2C_DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_st_nci_i2c_match),
},
- .probe = st21nfcb_nci_i2c_probe,
- .id_table = st21nfcb_nci_i2c_id_table,
- .remove = st21nfcb_nci_i2c_remove,
+ .probe = st_nci_i2c_probe,
+ .id_table = st_nci_i2c_id_table,
+ .remove = st_nci_i2c_remove,
};
-module_i2c_driver(st21nfcb_nci_i2c_driver);
+module_i2c_driver(st_nci_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 6014b5859465..56c6a4cb4c96 100644
--- a/drivers/nfc/st21nfcb/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -1,7 +1,7 @@
/*
* Low Level Transport (NDLC) Driver for STMicroelectronics NFC Chip
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,7 @@
#include <net/nfc/nci_core.h>
#include "ndlc.h"
-#include "st21nfcb.h"
+#include "st-nci.h"
#define NDLC_TIMER_T1 100
#define NDLC_TIMER_T1_WAIT 400
@@ -59,13 +59,25 @@ int ndlc_open(struct llt_ndlc *ndlc)
{
/* toggle reset pin */
ndlc->ops->enable(ndlc->phy_id);
+ ndlc->powered = 1;
return 0;
}
EXPORT_SYMBOL(ndlc_open);
void ndlc_close(struct llt_ndlc *ndlc)
{
+ struct nci_mode_set_cmd cmd;
+
+ cmd.cmd_type = ST_NCI_SET_NFC_MODE;
+ cmd.mode = 0;
+
/* toggle reset pin */
+ ndlc->ops->enable(ndlc->phy_id);
+
+ nci_prop_cmd(ndlc->ndev, ST_NCI_CORE_PROP,
+ sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd);
+
+ ndlc->powered = 0;
ndlc->ops->disable(ndlc->phy_id);
}
EXPORT_SYMBOL(ndlc_close);
@@ -262,6 +274,7 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
ndlc->ops = phy_ops;
ndlc->phy_id = phy_id;
ndlc->dev = dev;
+ ndlc->powered = 0;
*ndlc_id = ndlc;
@@ -280,12 +293,14 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work);
- return st21nfcb_nci_probe(ndlc, phy_headroom, phy_tailroom);
+ return st_nci_probe(ndlc, phy_headroom, phy_tailroom);
}
EXPORT_SYMBOL(ndlc_probe);
void ndlc_remove(struct llt_ndlc *ndlc)
{
+ st_nci_remove(ndlc->ndev);
+
/* cancel timers */
del_timer_sync(&ndlc->t1_timer);
del_timer_sync(&ndlc->t2_timer);
@@ -294,7 +309,5 @@ void ndlc_remove(struct llt_ndlc *ndlc)
skb_queue_purge(&ndlc->rcv_q);
skb_queue_purge(&ndlc->send_q);
-
- st21nfcb_nci_remove(ndlc->ndev);
}
EXPORT_SYMBOL(ndlc_remove);
diff --git a/drivers/nfc/st21nfcb/ndlc.h b/drivers/nfc/st-nci/ndlc.h
index b28140e0cd78..6361005ef003 100644
--- a/drivers/nfc/st21nfcb/ndlc.h
+++ b/drivers/nfc/st-nci/ndlc.h
@@ -1,7 +1,7 @@
/*
* NCI based Driver for STMicroelectronics NFC Chip
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -43,10 +43,11 @@ struct llt_ndlc {
struct device *dev;
/*
- * < 0 if hardware error occured
+ * < 0 if hardware error occurred
* and prevents normal operation.
*/
int hard_fault;
+ int powered;
};
int ndlc_open(struct llt_ndlc *ndlc);
diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st-nci/st-nci.h
index 5ef8a58c9839..850a2395deb7 100644
--- a/drivers/nfc/st21nfcb/st21nfcb.h
+++ b/drivers/nfc/st-nci/st-nci.h
@@ -16,23 +16,35 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __LOCAL_ST21NFCB_H_
-#define __LOCAL_ST21NFCB_H_
+#ifndef __LOCAL_ST_NCI_H_
+#define __LOCAL_ST_NCI_H_
-#include "st21nfcb_se.h"
+#include "st-nci_se.h"
#include "ndlc.h"
/* Define private flags: */
-#define ST21NFCB_NCI_RUNNING 1
+#define ST_NCI_RUNNING 1
-struct st21nfcb_nci_info {
+#define ST_NCI_CORE_PROP 0x01
+#define ST_NCI_SET_NFC_MODE 0x02
+
+struct nci_mode_set_cmd {
+ u8 cmd_type;
+ u8 mode;
+} __packed;
+
+struct nci_mode_set_rsp {
+ u8 status;
+} __packed;
+
+struct st_nci_info {
struct llt_ndlc *ndlc;
unsigned long flags;
- struct st21nfcb_se_info se_info;
+ struct st_nci_se_info se_info;
};
-void st21nfcb_nci_remove(struct nci_dev *ndev);
-int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
+void st_nci_remove(struct nci_dev *ndev);
+int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
int phy_tailroom);
-#endif /* __LOCAL_ST21NFCB_H_ */
+#endif /* __LOCAL_ST_NCI_H_ */
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st-nci/st-nci_se.c
index 24862a525fb5..97addfa96c6f 100644
--- a/drivers/nfc/st21nfcb/st21nfcb_se.c
+++ b/drivers/nfc/st-nci/st-nci_se.c
@@ -1,7 +1,7 @@
/*
- * NCI based Driver for STMicroelectronics NFC Chip
+ * Secure Element driver for STMicroelectronics NFC NCI chip
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -22,10 +22,10 @@
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
-#include "st21nfcb.h"
-#include "st21nfcb_se.h"
+#include "st-nci.h"
+#include "st-nci_se.h"
-struct st21nfcb_pipe_info {
+struct st_nci_pipe_info {
u8 pipe_state;
u8 src_host_id;
u8 src_gate_id;
@@ -34,166 +34,166 @@ struct st21nfcb_pipe_info {
} __packed;
/* Hosts */
-#define ST21NFCB_HOST_CONTROLLER_ID 0x00
-#define ST21NFCB_TERMINAL_HOST_ID 0x01
-#define ST21NFCB_UICC_HOST_ID 0x02
-#define ST21NFCB_ESE_HOST_ID 0xc0
+#define ST_NCI_HOST_CONTROLLER_ID 0x00
+#define ST_NCI_TERMINAL_HOST_ID 0x01
+#define ST_NCI_UICC_HOST_ID 0x02
+#define ST_NCI_ESE_HOST_ID 0xc0
/* Gates */
-#define ST21NFCB_DEVICE_MGNT_GATE 0x01
-#define ST21NFCB_APDU_READER_GATE 0xf0
-#define ST21NFCB_CONNECTIVITY_GATE 0x41
+#define ST_NCI_DEVICE_MGNT_GATE 0x01
+#define ST_NCI_APDU_READER_GATE 0xf0
+#define ST_NCI_CONNECTIVITY_GATE 0x41
/* Pipes */
-#define ST21NFCB_DEVICE_MGNT_PIPE 0x02
+#define ST_NCI_DEVICE_MGNT_PIPE 0x02
/* Connectivity pipe only */
-#define ST21NFCB_SE_COUNT_PIPE_UICC 0x01
+#define ST_NCI_SE_COUNT_PIPE_UICC 0x01
/* Connectivity + APDU Reader pipe */
-#define ST21NFCB_SE_COUNT_PIPE_EMBEDDED 0x02
+#define ST_NCI_SE_COUNT_PIPE_EMBEDDED 0x02
-#define ST21NFCB_SE_TO_HOT_PLUG 1000 /* msecs */
-#define ST21NFCB_SE_TO_PIPES 2000
+#define ST_NCI_SE_TO_HOT_PLUG 1000 /* msecs */
+#define ST_NCI_SE_TO_PIPES 2000
-#define ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80)
+#define ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80)
#define NCI_HCI_APDU_PARAM_ATR 0x01
#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01
#define NCI_HCI_ADMIN_PARAM_WHITELIST 0x03
#define NCI_HCI_ADMIN_PARAM_HOST_LIST 0x04
-#define ST21NFCB_EVT_SE_HARD_RESET 0x20
-#define ST21NFCB_EVT_TRANSMIT_DATA 0x10
-#define ST21NFCB_EVT_WTX_REQUEST 0x11
-#define ST21NFCB_EVT_SE_SOFT_RESET 0x11
-#define ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER 0x21
-#define ST21NFCB_EVT_HOT_PLUG 0x03
+#define ST_NCI_EVT_SE_HARD_RESET 0x20
+#define ST_NCI_EVT_TRANSMIT_DATA 0x10
+#define ST_NCI_EVT_WTX_REQUEST 0x11
+#define ST_NCI_EVT_SE_SOFT_RESET 0x11
+#define ST_NCI_EVT_SE_END_OF_APDU_TRANSFER 0x21
+#define ST_NCI_EVT_HOT_PLUG 0x03
-#define ST21NFCB_SE_MODE_OFF 0x00
-#define ST21NFCB_SE_MODE_ON 0x01
+#define ST_NCI_SE_MODE_OFF 0x00
+#define ST_NCI_SE_MODE_ON 0x01
-#define ST21NFCB_EVT_CONNECTIVITY 0x10
-#define ST21NFCB_EVT_TRANSACTION 0x12
+#define ST_NCI_EVT_CONNECTIVITY 0x10
+#define ST_NCI_EVT_TRANSACTION 0x12
-#define ST21NFCB_DM_GETINFO 0x13
-#define ST21NFCB_DM_GETINFO_PIPE_LIST 0x02
-#define ST21NFCB_DM_GETINFO_PIPE_INFO 0x01
-#define ST21NFCB_DM_PIPE_CREATED 0x02
-#define ST21NFCB_DM_PIPE_OPEN 0x04
-#define ST21NFCB_DM_RF_ACTIVE 0x80
-#define ST21NFCB_DM_DISCONNECT 0x30
+#define ST_NCI_DM_GETINFO 0x13
+#define ST_NCI_DM_GETINFO_PIPE_LIST 0x02
+#define ST_NCI_DM_GETINFO_PIPE_INFO 0x01
+#define ST_NCI_DM_PIPE_CREATED 0x02
+#define ST_NCI_DM_PIPE_OPEN 0x04
+#define ST_NCI_DM_RF_ACTIVE 0x80
+#define ST_NCI_DM_DISCONNECT 0x30
-#define ST21NFCB_DM_IS_PIPE_OPEN(p) \
- ((p & 0x0f) == (ST21NFCB_DM_PIPE_CREATED | ST21NFCB_DM_PIPE_OPEN))
+#define ST_NCI_DM_IS_PIPE_OPEN(p) \
+ ((p & 0x0f) == (ST_NCI_DM_PIPE_CREATED | ST_NCI_DM_PIPE_OPEN))
-#define ST21NFCB_ATR_DEFAULT_BWI 0x04
+#define ST_NCI_ATR_DEFAULT_BWI 0x04
/*
* WT = 2^BWI/10[s], convert into msecs and add a secure
* room by increasing by 2 this timeout
*/
-#define ST21NFCB_BWI_TO_TIMEOUT(x) ((1 << x) * 200)
-#define ST21NFCB_ATR_GET_Y_FROM_TD(x) (x >> 4)
+#define ST_NCI_BWI_TO_TIMEOUT(x) ((1 << x) * 200)
+#define ST_NCI_ATR_GET_Y_FROM_TD(x) (x >> 4)
/* If TA is present bit 0 is set */
-#define ST21NFCB_ATR_TA_PRESENT(x) (x & 0x01)
+#define ST_NCI_ATR_TA_PRESENT(x) (x & 0x01)
/* If TB is present bit 1 is set */
-#define ST21NFCB_ATR_TB_PRESENT(x) (x & 0x02)
+#define ST_NCI_ATR_TB_PRESENT(x) (x & 0x02)
-#define ST21NFCB_NUM_DEVICES 256
+#define ST_NCI_NUM_DEVICES 256
-static DECLARE_BITMAP(dev_mask, ST21NFCB_NUM_DEVICES);
+static DECLARE_BITMAP(dev_mask, ST_NCI_NUM_DEVICES);
-/* Here are the mandatory pipe for st21nfcb */
-static struct nci_hci_gate st21nfcb_gates[] = {
+/* Here are the mandatory pipe for st_nci */
+static struct nci_hci_gate st_nci_gates[] = {
{NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE,
- ST21NFCB_HOST_CONTROLLER_ID},
+ ST_NCI_HOST_CONTROLLER_ID},
{NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE,
- ST21NFCB_HOST_CONTROLLER_ID},
- {ST21NFCB_DEVICE_MGNT_GATE, ST21NFCB_DEVICE_MGNT_PIPE,
- ST21NFCB_HOST_CONTROLLER_ID},
+ ST_NCI_HOST_CONTROLLER_ID},
+ {ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DEVICE_MGNT_PIPE,
+ ST_NCI_HOST_CONTROLLER_ID},
/* Secure element pipes are created by secure element host */
- {ST21NFCB_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
- ST21NFCB_HOST_CONTROLLER_ID},
- {ST21NFCB_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
- ST21NFCB_HOST_CONTROLLER_ID},
+ {ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+ ST_NCI_HOST_CONTROLLER_ID},
+ {ST_NCI_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+ ST_NCI_HOST_CONTROLLER_ID},
};
-static u8 st21nfcb_se_get_bwi(struct nci_dev *ndev)
+static u8 st_nci_se_get_bwi(struct nci_dev *ndev)
{
int i;
u8 td;
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
/* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
- for (i = 1; i < ST21NFCB_ESE_MAX_LENGTH; i++) {
- td = ST21NFCB_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
- if (ST21NFCB_ATR_TA_PRESENT(td))
+ for (i = 1; i < ST_NCI_ESE_MAX_LENGTH; i++) {
+ td = ST_NCI_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+ if (ST_NCI_ATR_TA_PRESENT(td))
i++;
- if (ST21NFCB_ATR_TB_PRESENT(td)) {
+ if (ST_NCI_ATR_TB_PRESENT(td)) {
i++;
return info->se_info.atr[i] >> 4;
}
}
- return ST21NFCB_ATR_DEFAULT_BWI;
+ return ST_NCI_ATR_DEFAULT_BWI;
}
-static void st21nfcb_se_get_atr(struct nci_dev *ndev)
+static void st_nci_se_get_atr(struct nci_dev *ndev)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
int r;
struct sk_buff *skb;
- r = nci_hci_get_param(ndev, ST21NFCB_APDU_READER_GATE,
+ r = nci_hci_get_param(ndev, ST_NCI_APDU_READER_GATE,
NCI_HCI_APDU_PARAM_ATR, &skb);
if (r < 0)
return;
- if (skb->len <= ST21NFCB_ESE_MAX_LENGTH) {
+ if (skb->len <= ST_NCI_ESE_MAX_LENGTH) {
memcpy(info->se_info.atr, skb->data, skb->len);
info->se_info.wt_timeout =
- ST21NFCB_BWI_TO_TIMEOUT(st21nfcb_se_get_bwi(ndev));
+ ST_NCI_BWI_TO_TIMEOUT(st_nci_se_get_bwi(ndev));
}
kfree_skb(skb);
}
-int st21nfcb_hci_load_session(struct nci_dev *ndev)
+int st_nci_hci_load_session(struct nci_dev *ndev)
{
int i, j, r;
struct sk_buff *skb_pipe_list, *skb_pipe_info;
- struct st21nfcb_pipe_info *dm_pipe_info;
- u8 pipe_list[] = { ST21NFCB_DM_GETINFO_PIPE_LIST,
- ST21NFCB_TERMINAL_HOST_ID};
- u8 pipe_info[] = { ST21NFCB_DM_GETINFO_PIPE_INFO,
- ST21NFCB_TERMINAL_HOST_ID, 0};
+ struct st_nci_pipe_info *dm_pipe_info;
+ u8 pipe_list[] = { ST_NCI_DM_GETINFO_PIPE_LIST,
+ ST_NCI_TERMINAL_HOST_ID};
+ u8 pipe_info[] = { ST_NCI_DM_GETINFO_PIPE_INFO,
+ ST_NCI_TERMINAL_HOST_ID, 0};
- /* On ST21NFCB device pipes number are dynamics
+ /* On ST_NCI device pipes number are dynamics
* If pipes are already created, hci_dev_up will fail.
* Doing a clear all pipe is a bad idea because:
* - It does useless EEPROM cycling
* - It might cause issue for secure elements support
* (such as removing connectivity or APDU reader pipe)
- * A better approach on ST21NFCB is to:
+ * A better approach on ST_NCI is to:
* - get a pipe list for each host.
- * (eg: ST21NFCB_HOST_CONTROLLER_ID for now).
+ * (eg: ST_NCI_HOST_CONTROLLER_ID for now).
* (TODO Later on UICC HOST and eSE HOST)
* - get pipe information
- * - match retrieved pipe list in st21nfcb_gates
- * ST21NFCB_DEVICE_MGNT_GATE is a proprietary gate
- * with ST21NFCB_DEVICE_MGNT_PIPE.
+ * - match retrieved pipe list in st_nci_gates
+ * ST_NCI_DEVICE_MGNT_GATE is a proprietary gate
+ * with ST_NCI_DEVICE_MGNT_PIPE.
* Pipe can be closed and need to be open.
*/
- r = nci_hci_connect_gate(ndev, ST21NFCB_HOST_CONTROLLER_ID,
- ST21NFCB_DEVICE_MGNT_GATE,
- ST21NFCB_DEVICE_MGNT_PIPE);
+ r = nci_hci_connect_gate(ndev, ST_NCI_HOST_CONTROLLER_ID,
+ ST_NCI_DEVICE_MGNT_GATE,
+ ST_NCI_DEVICE_MGNT_PIPE);
if (r < 0)
goto free_info;
/* Get pipe list */
- r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
- ST21NFCB_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+ ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
&skb_pipe_list);
if (r < 0)
goto free_info;
@@ -201,8 +201,8 @@ int st21nfcb_hci_load_session(struct nci_dev *ndev)
/* Complete the existing gate_pipe table */
for (i = 0; i < skb_pipe_list->len; i++) {
pipe_info[2] = skb_pipe_list->data[i];
- r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
- ST21NFCB_DM_GETINFO, pipe_info,
+ r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+ ST_NCI_DM_GETINFO, pipe_info,
sizeof(pipe_info), &skb_pipe_info);
if (r)
@@ -217,81 +217,81 @@ int st21nfcb_hci_load_session(struct nci_dev *ndev)
* - destination hid (1byte)
* - destination gid (1byte)
*/
- dm_pipe_info = (struct st21nfcb_pipe_info *)skb_pipe_info->data;
- if (dm_pipe_info->dst_gate_id == ST21NFCB_APDU_READER_GATE &&
- dm_pipe_info->src_host_id != ST21NFCB_ESE_HOST_ID) {
+ dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data;
+ if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE &&
+ dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
pr_err("Unexpected apdu_reader pipe on host %x\n",
dm_pipe_info->src_host_id);
continue;
}
- for (j = 0; (j < ARRAY_SIZE(st21nfcb_gates)) &&
- (st21nfcb_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
+ for (j = 0; (j < ARRAY_SIZE(st_nci_gates)) &&
+ (st_nci_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
;
- if (j < ARRAY_SIZE(st21nfcb_gates) &&
- st21nfcb_gates[j].gate == dm_pipe_info->dst_gate_id &&
- ST21NFCB_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
- st21nfcb_gates[j].pipe = pipe_info[2];
+ if (j < ARRAY_SIZE(st_nci_gates) &&
+ st_nci_gates[j].gate == dm_pipe_info->dst_gate_id &&
+ ST_NCI_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
+ st_nci_gates[j].pipe = pipe_info[2];
- ndev->hci_dev->gate2pipe[st21nfcb_gates[j].gate] =
- st21nfcb_gates[j].pipe;
- ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].gate =
- st21nfcb_gates[j].gate;
- ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].host =
+ ndev->hci_dev->gate2pipe[st_nci_gates[j].gate] =
+ st_nci_gates[j].pipe;
+ ndev->hci_dev->pipes[st_nci_gates[j].pipe].gate =
+ st_nci_gates[j].gate;
+ ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
dm_pipe_info->src_host_id;
}
}
- memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
- sizeof(st21nfcb_gates));
+ memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
+ sizeof(st_nci_gates));
free_info:
kfree_skb(skb_pipe_info);
kfree_skb(skb_pipe_list);
return r;
}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_load_session);
+EXPORT_SYMBOL_GPL(st_nci_hci_load_session);
-static void st21nfcb_hci_admin_event_received(struct nci_dev *ndev,
+static void st_nci_hci_admin_event_received(struct nci_dev *ndev,
u8 event, struct sk_buff *skb)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
switch (event) {
- case ST21NFCB_EVT_HOT_PLUG:
+ case ST_NCI_EVT_HOT_PLUG:
if (info->se_info.se_active) {
- if (!ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+ if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
del_timer_sync(&info->se_info.se_active_timer);
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
} else {
mod_timer(&info->se_info.se_active_timer,
jiffies +
- msecs_to_jiffies(ST21NFCB_SE_TO_PIPES));
+ msecs_to_jiffies(ST_NCI_SE_TO_PIPES));
}
}
break;
}
}
-static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev,
+static int st_nci_hci_apdu_reader_event_received(struct nci_dev *ndev,
u8 event,
struct sk_buff *skb)
{
int r = 0;
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
pr_debug("apdu reader gate event: %x\n", event);
switch (event) {
- case ST21NFCB_EVT_TRANSMIT_DATA:
+ case ST_NCI_EVT_TRANSMIT_DATA:
del_timer_sync(&info->se_info.bwi_timer);
info->se_info.bwi_active = false;
info->se_info.cb(info->se_info.cb_context,
skb->data, skb->len, 0);
break;
- case ST21NFCB_EVT_WTX_REQUEST:
+ case ST_NCI_EVT_WTX_REQUEST:
mod_timer(&info->se_info.bwi_timer, jiffies +
msecs_to_jiffies(info->se_info.wt_timeout));
break;
@@ -306,7 +306,7 @@ static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev,
* <= 0: driver handled the event, skb consumed
* 1: driver does not handle the event, please do standard processing
*/
-static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
+static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
u8 host, u8 event,
struct sk_buff *skb)
{
@@ -317,10 +317,10 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
pr_debug("connectivity gate event: %x\n", event);
switch (event) {
- case ST21NFCB_EVT_CONNECTIVITY:
+ case ST_NCI_EVT_CONNECTIVITY:
break;
- case ST21NFCB_EVT_TRANSACTION:
+ case ST_NCI_EVT_TRANSACTION:
/* According to specification etsi 102 622
* 11.2.2.4 EVT_TRANSACTION Table 52
* Description Tag Length
@@ -355,7 +355,7 @@ static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
return r;
}
-void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
+void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
u8 event, struct sk_buff *skb)
{
u8 gate = ndev->hci_dev->pipes[pipe].gate;
@@ -363,32 +363,32 @@ void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
switch (gate) {
case NCI_HCI_ADMIN_GATE:
- st21nfcb_hci_admin_event_received(ndev, event, skb);
+ st_nci_hci_admin_event_received(ndev, event, skb);
break;
- case ST21NFCB_APDU_READER_GATE:
- st21nfcb_hci_apdu_reader_event_received(ndev, event, skb);
+ case ST_NCI_APDU_READER_GATE:
+ st_nci_hci_apdu_reader_event_received(ndev, event, skb);
break;
- case ST21NFCB_CONNECTIVITY_GATE:
- st21nfcb_hci_connectivity_event_received(ndev, host, event,
+ case ST_NCI_CONNECTIVITY_GATE:
+ st_nci_hci_connectivity_event_received(ndev, host, event,
skb);
break;
}
}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_event_received);
+EXPORT_SYMBOL_GPL(st_nci_hci_event_received);
-void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
struct sk_buff *skb)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
u8 gate = ndev->hci_dev->pipes[pipe].gate;
pr_debug("cmd: %x\n", cmd);
switch (cmd) {
case NCI_HCI_ANY_OPEN_PIPE:
- if (gate != ST21NFCB_APDU_READER_GATE &&
- ndev->hci_dev->pipes[pipe].host != ST21NFCB_UICC_HOST_ID)
+ if (gate != ST_NCI_APDU_READER_GATE &&
+ ndev->hci_dev->pipes[pipe].host != ST_NCI_UICC_HOST_ID)
ndev->hci_dev->count_pipes++;
if (ndev->hci_dev->count_pipes ==
@@ -401,28 +401,28 @@ void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
break;
}
}
-EXPORT_SYMBOL_GPL(st21nfcb_hci_cmd_received);
+EXPORT_SYMBOL_GPL(st_nci_hci_cmd_received);
/*
- * Remarks: On some early st21nfcb firmware, nci_nfcee_mode_set(0)
+ * Remarks: On some early st_nci firmware, nci_nfcee_mode_set(0)
* is rejected
*/
-static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
+static int st_nci_control_se(struct nci_dev *ndev, u8 se_idx,
u8 state)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
int r;
struct sk_buff *sk_host_list;
u8 host_id;
switch (se_idx) {
- case ST21NFCB_UICC_HOST_ID:
+ case ST_NCI_UICC_HOST_ID:
ndev->hci_dev->count_pipes = 0;
- ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_UICC;
+ ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_UICC;
break;
- case ST21NFCB_ESE_HOST_ID:
+ case ST_NCI_ESE_HOST_ID:
ndev->hci_dev->count_pipes = 0;
- ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_EMBEDDED;
+ ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_EMBEDDED;
break;
default:
return -EINVAL;
@@ -438,7 +438,7 @@ static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
return r;
mod_timer(&info->se_info.se_active_timer, jiffies +
- msecs_to_jiffies(ST21NFCB_SE_TO_HOT_PLUG));
+ msecs_to_jiffies(ST_NCI_SE_TO_HOT_PLUG));
info->se_info.se_active = true;
/* Ignore return value and check in any case the host_list */
@@ -458,49 +458,49 @@ static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
host_id = sk_host_list->data[sk_host_list->len - 1];
kfree_skb(sk_host_list);
- if (state == ST21NFCB_SE_MODE_ON && host_id == se_idx)
+ if (state == ST_NCI_SE_MODE_ON && host_id == se_idx)
return se_idx;
- else if (state == ST21NFCB_SE_MODE_OFF && host_id != se_idx)
+ else if (state == ST_NCI_SE_MODE_OFF && host_id != se_idx)
return se_idx;
return -1;
}
-int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
+int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st21nfcb_nci_disable_se\n");
+ pr_debug("st_nci_disable_se\n");
if (se_idx == NFC_SE_EMBEDDED) {
- r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
- ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+ r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+ ST_NCI_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
if (r < 0)
return r;
}
return 0;
}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_disable_se);
+EXPORT_SYMBOL_GPL(st_nci_disable_se);
-int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
+int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st21nfcb_nci_enable_se\n");
+ pr_debug("st_nci_enable_se\n");
- if (se_idx == ST21NFCB_HCI_HOST_ID_ESE) {
- r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
- ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+ if (se_idx == ST_NCI_HCI_HOST_ID_ESE) {
+ r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+ ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
if (r < 0)
return r;
}
return 0;
}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_enable_se);
+EXPORT_SYMBOL_GPL(st_nci_enable_se);
-static int st21nfcb_hci_network_init(struct nci_dev *ndev)
+static int st_nci_hci_network_init(struct nci_dev *ndev)
{
struct core_conn_create_dest_spec_params *dest_params;
struct dest_spec_params spec_params;
@@ -519,7 +519,8 @@ static int st21nfcb_hci_network_init(struct nci_dev *ndev)
dest_params->length = sizeof(struct dest_spec_params);
spec_params.id = ndev->hci_dev->nfcee_id;
spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS;
- memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params));
+ memcpy(dest_params->value, &spec_params,
+ sizeof(struct dest_spec_params));
r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1,
sizeof(struct core_conn_create_dest_spec_params) +
sizeof(struct dest_spec_params),
@@ -531,15 +532,15 @@ static int st21nfcb_hci_network_init(struct nci_dev *ndev)
if (!conn_info)
goto free_dest_params;
- memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
- sizeof(st21nfcb_gates));
+ memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
+ sizeof(st_nci_gates));
/*
* Session id must include the driver name + i2c bus addr
* persistent info to discriminate 2 identical chips
*/
- dev_num = find_first_zero_bit(dev_mask, ST21NFCB_NUM_DEVICES);
- if (dev_num >= ST21NFCB_NUM_DEVICES) {
+ dev_num = find_first_zero_bit(dev_mask, ST_NCI_NUM_DEVICES);
+ if (dev_num >= ST_NCI_NUM_DEVICES) {
r = -ENODEV;
goto free_dest_params;
}
@@ -564,72 +565,72 @@ exit:
return r;
}
-int st21nfcb_nci_discover_se(struct nci_dev *ndev)
+int st_nci_discover_se(struct nci_dev *ndev)
{
u8 param[2];
int r;
int se_count = 0;
- pr_debug("st21nfcb_nci_discover_se\n");
+ pr_debug("st_nci_discover_se\n");
- r = st21nfcb_hci_network_init(ndev);
+ r = st_nci_hci_network_init(ndev);
if (r != 0)
return r;
- param[0] = ST21NFCB_UICC_HOST_ID;
- param[1] = ST21NFCB_HCI_HOST_ID_ESE;
+ param[0] = ST_NCI_UICC_HOST_ID;
+ param[1] = ST_NCI_HCI_HOST_ID_ESE;
r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
NCI_HCI_ADMIN_PARAM_WHITELIST,
param, sizeof(param));
if (r != NCI_HCI_ANY_OK)
return r;
- r = st21nfcb_nci_control_se(ndev, ST21NFCB_UICC_HOST_ID,
- ST21NFCB_SE_MODE_ON);
- if (r == ST21NFCB_UICC_HOST_ID) {
- nfc_add_se(ndev->nfc_dev, ST21NFCB_UICC_HOST_ID, NFC_SE_UICC);
+ r = st_nci_control_se(ndev, ST_NCI_UICC_HOST_ID,
+ ST_NCI_SE_MODE_ON);
+ if (r == ST_NCI_UICC_HOST_ID) {
+ nfc_add_se(ndev->nfc_dev, ST_NCI_UICC_HOST_ID, NFC_SE_UICC);
se_count++;
}
/* Try to enable eSE in order to check availability */
- r = st21nfcb_nci_control_se(ndev, ST21NFCB_HCI_HOST_ID_ESE,
- ST21NFCB_SE_MODE_ON);
- if (r == ST21NFCB_HCI_HOST_ID_ESE) {
- nfc_add_se(ndev->nfc_dev, ST21NFCB_HCI_HOST_ID_ESE,
+ r = st_nci_control_se(ndev, ST_NCI_HCI_HOST_ID_ESE,
+ ST_NCI_SE_MODE_ON);
+ if (r == ST_NCI_HCI_HOST_ID_ESE) {
+ nfc_add_se(ndev->nfc_dev, ST_NCI_HCI_HOST_ID_ESE,
NFC_SE_EMBEDDED);
se_count++;
- st21nfcb_se_get_atr(ndev);
+ st_nci_se_get_atr(ndev);
}
return !se_count;
}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_discover_se);
+EXPORT_SYMBOL_GPL(st_nci_discover_se);
-int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
u8 *apdu, size_t apdu_length,
se_io_cb_t cb, void *cb_context)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
pr_debug("\n");
switch (se_idx) {
- case ST21NFCB_HCI_HOST_ID_ESE:
+ case ST_NCI_HCI_HOST_ID_ESE:
info->se_info.cb = cb;
info->se_info.cb_context = cb_context;
mod_timer(&info->se_info.bwi_timer, jiffies +
msecs_to_jiffies(info->se_info.wt_timeout));
info->se_info.bwi_active = true;
- return nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
- ST21NFCB_EVT_TRANSMIT_DATA, apdu,
+ return nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+ ST_NCI_EVT_TRANSMIT_DATA, apdu,
apdu_length);
default:
return -ENODEV;
}
}
-EXPORT_SYMBOL(st21nfcb_nci_se_io);
+EXPORT_SYMBOL(st_nci_se_io);
-static void st21nfcb_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(unsigned long data)
{
/*
* No answer from the secure element
@@ -642,7 +643,7 @@ static void st21nfcb_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+ struct st_nci_info *info = (struct st_nci_info *) data;
pr_debug("\n");
@@ -650,19 +651,19 @@ static void st21nfcb_se_wt_timeout(unsigned long data)
if (!info->se_info.xch_error) {
info->se_info.xch_error = true;
- nci_hci_send_event(info->ndlc->ndev, ST21NFCB_APDU_READER_GATE,
- ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+ nci_hci_send_event(info->ndlc->ndev, ST_NCI_APDU_READER_GATE,
+ ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
} else {
info->se_info.xch_error = false;
- nci_hci_send_event(info->ndlc->ndev, ST21NFCB_DEVICE_MGNT_GATE,
- ST21NFCB_EVT_SE_HARD_RESET, &param, 1);
+ nci_hci_send_event(info->ndlc->ndev, ST_NCI_DEVICE_MGNT_GATE,
+ ST_NCI_EVT_SE_HARD_RESET, &param, 1);
}
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st21nfcb_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(unsigned long data)
{
- struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+ struct st_nci_info *info = (struct st_nci_info *) data;
pr_debug("\n");
@@ -671,35 +672,35 @@ static void st21nfcb_se_activation_timeout(unsigned long data)
complete(&info->se_info.req_completion);
}
-int st21nfcb_se_init(struct nci_dev *ndev)
+int st_nci_se_init(struct nci_dev *ndev)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
init_completion(&info->se_info.req_completion);
/* initialize timers */
init_timer(&info->se_info.bwi_timer);
info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st21nfcb_se_wt_timeout;
+ info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
info->se_info.bwi_active = false;
init_timer(&info->se_info.se_active_timer);
info->se_info.se_active_timer.data = (unsigned long)info;
info->se_info.se_active_timer.function =
- st21nfcb_se_activation_timeout;
+ st_nci_se_activation_timeout;
info->se_info.se_active = false;
info->se_info.xch_error = false;
info->se_info.wt_timeout =
- ST21NFCB_BWI_TO_TIMEOUT(ST21NFCB_ATR_DEFAULT_BWI);
+ ST_NCI_BWI_TO_TIMEOUT(ST_NCI_ATR_DEFAULT_BWI);
return 0;
}
-EXPORT_SYMBOL(st21nfcb_se_init);
+EXPORT_SYMBOL(st_nci_se_init);
-void st21nfcb_se_deinit(struct nci_dev *ndev)
+void st_nci_se_deinit(struct nci_dev *ndev)
{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+ struct st_nci_info *info = nci_get_drvdata(ndev);
if (info->se_info.bwi_active)
del_timer_sync(&info->se_info.bwi_timer);
@@ -709,5 +710,5 @@ void st21nfcb_se_deinit(struct nci_dev *ndev)
info->se_info.se_active = false;
info->se_info.bwi_active = false;
}
-EXPORT_SYMBOL(st21nfcb_se_deinit);
+EXPORT_SYMBOL(st_nci_se_deinit);
diff --git a/drivers/nfc/st-nci/st-nci_se.h b/drivers/nfc/st-nci/st-nci_se.h
new file mode 100644
index 000000000000..ea66e879d67f
--- /dev/null
+++ b/drivers/nfc/st-nci/st-nci_se.h
@@ -0,0 +1,61 @@
+/*
+ * Secure Element Driver for STMicroelectronics NFC NCI Chip
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LOCAL_ST_NCI_SE_H_
+#define __LOCAL_ST_NCI_SE_H_
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST_NCI_ESE_MAX_LENGTH 33
+#define ST_NCI_HCI_HOST_ID_ESE 0xc0
+
+struct st_nci_se_info {
+ u8 atr[ST_NCI_ESE_MAX_LENGTH];
+ struct completion req_completion;
+
+ struct timer_list bwi_timer;
+ int wt_timeout; /* in msecs */
+ bool bwi_active;
+
+ struct timer_list se_active_timer;
+ bool se_active;
+
+ bool xch_error;
+
+ se_io_cb_t cb;
+ void *cb_context;
+};
+
+int st_nci_se_init(struct nci_dev *ndev);
+void st_nci_se_deinit(struct nci_dev *ndev);
+
+int st_nci_discover_se(struct nci_dev *ndev);
+int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
+int st_nci_hci_load_session(struct nci_dev *ndev);
+void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+ u8 event, struct sk_buff *skb);
+void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+
+
+#endif /* __LOCAL_ST_NCI_SE_H_ */
diff --git a/drivers/nfc/st21nfcb/Kconfig b/drivers/nfc/st21nfcb/Kconfig
deleted file mode 100644
index e0322dd03a70..000000000000
--- a/drivers/nfc/st21nfcb/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-config NFC_ST21NFCB
- tristate "STMicroelectronics ST21NFCB NFC driver"
- depends on NFC_NCI
- default n
- ---help---
- STMicroelectronics ST21NFCB core driver. It implements the chipset
- NCI logic and hooks into the NFC kernel APIs. Physical layers will
- register against it.
-
- To compile this driver as a module, choose m here. The module will
- be called st21nfcb.
- Say N if unsure.
-
-config NFC_ST21NFCB_I2C
- tristate "NFC ST21NFCB i2c support"
- depends on NFC_ST21NFCB && I2C
- ---help---
- This module adds support for the STMicroelectronics st21nfcb i2c interface.
- Select this if your platform is using the i2c bus.
-
- If you choose to build a module, it'll be called st21nfcb_i2c.
- Say N if unsure.
diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile
deleted file mode 100644
index ce659a9e5a1a..000000000000
--- a/drivers/nfc/st21nfcb/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for ST21NFCB NCI based NFC driver
-#
-
-st21nfcb_nci-objs = ndlc.o st21nfcb.o st21nfcb_se.o
-obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o
-
-st21nfcb_i2c-objs = i2c.o
-obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o
diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
deleted file mode 100644
index ca9871ab3fb3..000000000000
--- a/drivers/nfc/st21nfcb/st21nfcb.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * NCI based Driver for STMicroelectronics NFC Chip
- *
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/nfc.h>
-#include <net/nfc/nci.h>
-#include <net/nfc/nci_core.h>
-
-#include "st21nfcb.h"
-#include "st21nfcb_se.h"
-
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
-
-#define ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 0x83
-
-static int st21nfcb_nci_open(struct nci_dev *ndev)
-{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
- int r;
-
- if (test_and_set_bit(ST21NFCB_NCI_RUNNING, &info->flags))
- return 0;
-
- r = ndlc_open(info->ndlc);
- if (r)
- clear_bit(ST21NFCB_NCI_RUNNING, &info->flags);
-
- return r;
-}
-
-static int st21nfcb_nci_close(struct nci_dev *ndev)
-{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
- if (!test_and_clear_bit(ST21NFCB_NCI_RUNNING, &info->flags))
- return 0;
-
- ndlc_close(info->ndlc);
-
- return 0;
-}
-
-static int st21nfcb_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
-{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
- skb->dev = (void *)ndev;
-
- if (!test_bit(ST21NFCB_NCI_RUNNING, &info->flags))
- return -EBUSY;
-
- return ndlc_send(info->ndlc, skb);
-}
-
-static __u32 st21nfcb_nci_get_rfprotocol(struct nci_dev *ndev,
- __u8 rf_protocol)
-{
- return rf_protocol == ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 ?
- NFC_PROTO_ISO15693_MASK : 0;
-}
-
-static struct nci_ops st21nfcb_nci_ops = {
- .open = st21nfcb_nci_open,
- .close = st21nfcb_nci_close,
- .send = st21nfcb_nci_send,
- .get_rfprotocol = st21nfcb_nci_get_rfprotocol,
- .discover_se = st21nfcb_nci_discover_se,
- .enable_se = st21nfcb_nci_enable_se,
- .disable_se = st21nfcb_nci_disable_se,
- .se_io = st21nfcb_nci_se_io,
- .hci_load_session = st21nfcb_hci_load_session,
- .hci_event_received = st21nfcb_hci_event_received,
- .hci_cmd_received = st21nfcb_hci_cmd_received,
-};
-
-int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
- int phy_tailroom)
-{
- struct st21nfcb_nci_info *info;
- int r;
- u32 protocols;
-
- info = devm_kzalloc(ndlc->dev,
- sizeof(struct st21nfcb_nci_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK
- | NFC_PROTO_FELICA_MASK
- | NFC_PROTO_ISO14443_MASK
- | NFC_PROTO_ISO14443_B_MASK
- | NFC_PROTO_ISO15693_MASK
- | NFC_PROTO_NFC_DEP_MASK;
-
- ndlc->ndev = nci_allocate_device(&st21nfcb_nci_ops, protocols,
- phy_headroom, phy_tailroom);
- if (!ndlc->ndev) {
- pr_err("Cannot allocate nfc ndev\n");
- return -ENOMEM;
- }
- info->ndlc = ndlc;
-
- nci_set_drvdata(ndlc->ndev, info);
-
- r = nci_register_device(ndlc->ndev);
- if (r) {
- pr_err("Cannot register nfc device to nci core\n");
- nci_free_device(ndlc->ndev);
- return r;
- }
-
- return st21nfcb_se_init(ndlc->ndev);
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
-
-void st21nfcb_nci_remove(struct nci_dev *ndev)
-{
- struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
- nci_unregister_device(ndev);
- nci_free_device(ndev);
- kfree(info);
-}
-EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.h b/drivers/nfc/st21nfcb/st21nfcb_se.h
deleted file mode 100644
index 52a323872bea..000000000000
--- a/drivers/nfc/st21nfcb/st21nfcb_se.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * NCI based Driver for STMicroelectronics NFC Chip
- *
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __LOCAL_ST21NFCB_SE_H_
-#define __LOCAL_ST21NFCB_SE_H_
-
-/*
- * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
- * sequence of at most 32 characters.
- */
-#define ST21NFCB_ESE_MAX_LENGTH 33
-#define ST21NFCB_HCI_HOST_ID_ESE 0xc0
-
-struct st21nfcb_se_info {
- u8 atr[ST21NFCB_ESE_MAX_LENGTH];
- struct completion req_completion;
-
- struct timer_list bwi_timer;
- int wt_timeout; /* in msecs */
- bool bwi_active;
-
- struct timer_list se_active_timer;
- bool se_active;
-
- bool xch_error;
-
- se_io_cb_t cb;
- void *cb_context;
-};
-
-int st21nfcb_se_init(struct nci_dev *ndev);
-void st21nfcb_se_deinit(struct nci_dev *ndev);
-
-int st21nfcb_nci_discover_se(struct nci_dev *ndev);
-int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
-int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
-int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
- u8 *apdu, size_t apdu_length,
- se_io_cb_t cb, void *cb_context);
-int st21nfcb_hci_load_session(struct nci_dev *ndev);
-void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
- u8 event, struct sk_buff *skb);
-void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
- struct sk_buff *skb);
-
-
-#endif /* __LOCAL_ST21NFCB_NCI_H_ */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index aa6a333b2ead..85b4d86772d8 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -149,6 +149,7 @@
*/
#define TRF7970A_QUIRK_IRQ_STATUS_READ BIT(0)
#define TRF7970A_QUIRK_EN2_MUST_STAY_LOW BIT(1)
+#define TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE BIT(2)
/* Direct commands */
#define TRF7970A_CMD_IDLE 0x00
@@ -446,6 +447,7 @@ struct trf7970a {
u8 md_rf_tech;
u8 tx_cmd;
bool issue_eof;
+ bool adjust_resp_len;
int en2_gpio;
int en_gpio;
struct mutex lock;
@@ -626,6 +628,11 @@ static void trf7970a_send_upstream(struct trf7970a *trf)
trf->aborting = false;
}
+ if (trf->adjust_resp_len) {
+ skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+ trf->adjust_resp_len = false;
+ }
+
trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
trf->rx_skb = NULL;
@@ -1429,10 +1436,15 @@ static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
trf->iso_ctrl = iso_ctrl;
}
- if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
- trf7970a_is_iso15693_write_or_lock(req[1]) &&
- (req[0] & ISO15693_REQ_FLAG_OPTION))
- trf->issue_eof = true;
+ if (trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) {
+ if (trf7970a_is_iso15693_write_or_lock(req[1]) &&
+ (req[0] & ISO15693_REQ_FLAG_OPTION))
+ trf->issue_eof = true;
+ else if ((trf->quirks &
+ TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE) &&
+ (req[1] == ISO15693_CMD_READ_MULTIPLE_BLOCK))
+ trf->adjust_resp_len = true;
+ }
}
return 0;
@@ -1992,6 +2004,9 @@ static int trf7970a_probe(struct spi_device *spi)
return ret;
}
+ if (of_property_read_bool(np, "t5t-rmb-extra-byte-quirk"))
+ trf->quirks |= TRF7970A_QUIRK_T5T_RMB_EXTRA_BYTE;
+
if (of_property_read_bool(np, "irq-status-read-quirk"))
trf->quirks |= TRF7970A_QUIRK_IRQ_STATUS_READ;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 99764db0875a..f0650265febf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
return 0;
}
-static int __init of_init(void)
+void __init of_core_init(void)
{
struct device_node *np;
@@ -198,7 +198,8 @@ static int __init of_init(void)
of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
if (!of_kset) {
mutex_unlock(&of_mutex);
- return -ENOMEM;
+ pr_err("devicetree: failed to register existing nodes\n");
+ return;
}
for_each_of_allnodes(np)
__of_attach_node_sysfs(np);
@@ -207,10 +208,7 @@ static int __init of_init(void)
/* Symlink in /proc as required by userspace ABI */
if (of_root)
proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
-
- return 0;
}
-core_initcall(of_init);
static struct property *__of_find_property(const struct device_node *np,
const char *name, int *lenp)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3351ef408125..53826b84e0ec 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
phandle = __of_get_property(np, "phandle", &sz);
if (!phandle)
phandle = __of_get_property(np, "linux,phandle", &sz);
- if (IS_ENABLED(PPC_PSERIES) && !phandle)
+ if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
phandle = __of_get_property(np, "ibm,phandle", &sz);
np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fd0cacf7ca0..508cc56130e3 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
* consistent.
*/
if (add_align > dev_res->res->start) {
+ resource_size_t r_size = resource_size(dev_res->res);
+
dev_res->res->start = add_align;
- dev_res->res->end = add_align +
- resource_size(dev_res->res);
+ dev_res->res->end = add_align + r_size - 1;
list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev,
dev_res2->res);
- if (add_align > align)
+ if (add_align > align) {
list_move_tail(&dev_res->list,
&dev_res2->list);
+ break;
+ }
}
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a53bd5b52df9..fc9b9f0ea91e 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
depends on ARCH_OMAP2PLUS
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_PHY
help
Enable this for dm816x USB to work.
@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
- depends on USB_PHY
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_PHY
select OMAP_CONTROL_PHY
depends on OMAP_OCP2SCP
help
@@ -122,8 +125,9 @@ config TI_PIPE3
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
- depends on USB_PHY
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_PHY
help
Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices).
@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
config PHY_QCOM_UFS
tristate "Qualcomm UFS PHY driver"
- depends on OF && ARCH_MSM
+ depends on OF && ARCH_QCOM
select GENERIC_PHY
help
Support for UFS PHY on QCOM chipsets.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 3791838f4bd4..63bc12d7a73e 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
{
struct phy *phy = phy_get(dev, string);
- if (PTR_ERR(phy) == -ENODEV)
+ if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
phy = NULL;
return phy;
@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
{
struct phy *phy = devm_phy_get(dev, string);
- if (PTR_ERR(phy) == -ENODEV)
+ if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
phy = NULL;
return phy;
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 183ef4368101..c1a468686bdc 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
+ pm_runtime_disable(phy->dev);
return PTR_ERR(phy->wkupclk);
} else {
dev_warn(&pdev->dev,
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 778276aba3aa..97d45f47d1ad 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -23,7 +23,7 @@
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
#define USBHS_UGCTRL2 0x84
-#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */
+#define USBHS_UGSTS 0x88 /* From technical update */
/* Low Power Status register (LPSTS) */
#define USBHS_LPSTS_SUSPM 0x4000
@@ -41,7 +41,7 @@
#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
/* USB General status register (UGSTS) */
-#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */
+#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
#define PHYS_PER_CHANNEL 2
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index bcdb22d5e215..3c1850332a90 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -4,6 +4,7 @@
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on ARCH_MEDIATEK
+ depends on RESET_CONTROLLER
select REGMAP
help
Say yes here to add support for MediaTek PMIC Wrapper found
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index db5be1eec54c..f432291feee9 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{
int ret;
- u32 val;
-
- val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
- if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret)
@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
int ret;
- u32 val;
-
- val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
- if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret)
@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
*rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
return 0;
}
@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
{
- unsigned long rate_spi;
- int ck_mhz;
-
- rate_spi = clk_get_rate(wrp->clk_spi);
-
- if (rate_spi > 26000000)
- ck_mhz = 26;
- else if (rate_spi > 18000000)
- ck_mhz = 18;
- else
- ck_mhz = 0;
-
- switch (ck_mhz) {
- case 18:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
- break;
- case 26:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ if (pwrap_is_mt8135(wrp)) {
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
- break;
- case 0:
- if (pwrap_is_mt8135(wrp))
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
- break;
- default:
- return -EINVAL;
+ } else {
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
}
return 0;
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 5ff4716b72c3..784b5ecfa849 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
/*
* Context: softirq
*/
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
- int length, int offset, int total_size)
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
+ u8 length, u16 offset, u16 total_size)
{
struct oz_port *port = hport;
struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
if (!urb)
return;
if (status == 0) {
- int copy_len;
- int required_size = urb->transfer_buffer_length;
+ unsigned int copy_len;
+ unsigned int required_size = urb->transfer_buffer_length;
if (required_size > total_size)
required_size = total_size;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 4249fa374012..d2a6085345be 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
/* Confirmation functions.
*/
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
- const u8 *desc, int length, int offset, int total_size);
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
+ const u8 *desc, u8 length, u16 offset, u16 total_size);
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
const u8 *data, int data_len);
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index d434d8c6fff6..f660bb198c65 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_multiple_fixed *body =
(struct oz_multiple_fixed *)data_hdr;
u8 *data = body->data;
- int n = (len - sizeof(struct oz_multiple_fixed)+1)
+ unsigned int n;
+ if (!body->unit_size ||
+ len < sizeof(struct oz_multiple_fixed) - 1)
+ break;
+ n = (len - (sizeof(struct oz_multiple_fixed) - 1))
/ body->unit_size;
while (n--) {
oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
case OZ_GET_DESC_RSP: {
struct oz_get_desc_rsp *body =
(struct oz_get_desc_rsp *)usb_hdr;
- int data_len = elt->length -
- sizeof(struct oz_get_desc_rsp) + 1;
- u16 offs = le16_to_cpu(get_unaligned(&body->offset));
- u16 total_size =
+ u16 offs, total_size;
+ u8 data_len;
+
+ if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
+ break;
+ data_len = elt->length -
+ (sizeof(struct oz_get_desc_rsp) - 1);
+ offs = le16_to_cpu(get_unaligned(&body->offset));
+ total_size =
le16_to_cpu(get_unaligned(&body->total_size));
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index f1d47a0676c3..ada8d5dafd49 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedLinkBlinkInProgress = true;
@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
mod_timer(&pLed->BlinkTimer,
@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
return;
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_ON;
pLed->BlinkingLedState = LED_ON;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
mod_timer(&pLed->BlinkTimer,
@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_ON;
pLed->BlinkingLedState = LED_ON;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
mod_timer(&pLed->BlinkTimer,
@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
} else
pLed->bLedWPSBlinkInProgress = true;
@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->CurrLedState = LED_OFF;
@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
mod_timer(&pLed->BlinkTimer,
@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
case LED_CTL_START_TO_LINK:
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
+ del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn)
@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
pLed->bLedStartToLinkBlinkInProgress = true;
@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (LedAction == LED_CTL_LINK) {
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
+ del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn)
@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON:
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&(pLed1->BlinkTimer));
+ del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn)
@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
}
if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS: /*WPS connect success*/
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/
if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
+ del_timer(&pLed1->BlinkTimer);
else
pLed1->bLedWPSBlinkInProgress = true;
pLed1->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break;
case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/
if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
+ del_timer(&pLed1->BlinkTimer);
else
pLed1->bLedWPSBlinkInProgress = true;
pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedStartToLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedStartToLinkBlinkInProgress = false;
}
if (pLed1->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed1->BlinkTimer);
+ del_timer(&pLed1->BlinkTimer);
pLed1->bLedWPSBlinkInProgress = false;
}
pLed1->BlinkingLedState = LED_UNKNOWN;
@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
; /* dummy branch */
else if (pLed->bLedScanBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_STOP_WPS_FAIL:
case LED_CTL_STOP_WPS:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->CurrLedState = LED_ON;
@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
+ del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 1a1c38f885d6..e35854d28f90 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
if (pcmd->res != H2C_SUCCESS)
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
- del_timer_sync(&pmlmepriv->assoc_timer);
+ del_timer(&pmlmepriv->assoc_timer);
#ifdef __BIG_ENDIAN
/* endian_convert */
pnetwork->Length = le32_to_cpu(pnetwork->Length);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index fb2b195b90af..c044b0e55ba9 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
- del_timer_sync(&pmlmepriv->scan_to_timer);
+ del_timer(&pmlmepriv->scan_to_timer);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
}
if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) {
- del_timer_sync(&pmlmepriv->dhcp_timer);
+ del_timer(&pmlmepriv->dhcp_timer);
r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
padapter->registrypriv.smart_ps);
}
@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
== true)
r8712_indicate_connect(adapter);
- del_timer_sync(&pmlmepriv->assoc_timer);
+ del_timer(&pmlmepriv->assoc_timer);
} else
goto ignore_joinbss_callback;
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index aaa584435c87..9bc04f474d18 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
- del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer);
+ del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
_enter_pwrlock(&pwrpriv->lock);
pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
if (pwrpriv->cpwm >= PS_STATE_S2) {
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 7bb96c47f188..a9b93d0f6f56 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
* cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer(&preorder_ctrl->reordering_ctrl_timer);
}
spin_lock(&(pfree_sta_queue->lock));
/* insert into free_sta_queue; 20061114 */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ecd7c0f82481..458bc340aece 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1793,10 +1793,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0d97b6457ead..2ccfbff37c42 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -978,10 +978,10 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
priv->hw->max_signal = 100;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cc57a3a6b02b..396344cb011f 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
+static inline int tty_copy_to_user(struct tty_struct *tty,
+ void __user *to,
+ const void *from,
+ unsigned long n)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ tty_audit_add_data(tty, to, n, ldata->icanon);
+ return copy_to_user(to, from, n);
+}
+
/**
* n_tty_kick_worker - start input worker (if required)
* @tty: terminal
@@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
size = N_TTY_BUF_SIZE - tail;
n = eol - tail;
- if (n > 4096)
- n += 4096;
+ if (n > N_TTY_BUF_SIZE)
+ n += N_TTY_BUF_SIZE;
n += found;
c = n;
@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
__func__, eol, found, n, c, size, more);
if (n > size) {
- ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
+ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
if (ret)
return -EFAULT;
- ret = copy_to_user(*b + size, ldata->read_buf, n - size);
+ ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
} else
- ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
+ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
if (ret)
return -EFAULT;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 9289999cb7c6..dce1a23706e8 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id)
return IRQ_NONE;
}
+#ifdef CONFIG_SERIAL_8250_DMA
+static int omap_8250_dma_handle_irq(struct uart_port *port);
+#endif
+
+static irqreturn_t omap8250_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int iir;
+ int ret;
+
+#ifdef CONFIG_SERIAL_8250_DMA
+ if (up->dma) {
+ ret = omap_8250_dma_handle_irq(port);
+ return IRQ_RETVAL(ret);
+ }
+#endif
+
+ serial8250_rpm_get(up);
+ iir = serial_port_in(port, UART_IIR);
+ ret = serial8250_handle_irq(port, iir);
+ serial8250_rpm_put(up);
+
+ return IRQ_RETVAL(ret);
+}
+
static int omap_8250_startup(struct uart_port *port)
{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = port->private_data;
-
int ret;
if (priv->wakeirq) {
@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port)
pm_runtime_get_sync(port->dev);
- ret = serial8250_do_startup(port);
- if (ret)
+ up->mcr = 0;
+ serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+ up->lsr_saved_flags = 0;
+ up->msr_saved_flags = 0;
+
+ if (up->dma) {
+ ret = serial8250_request_dma(up);
+ if (ret) {
+ dev_warn_ratelimited(port->dev,
+ "failed to request DMA\n");
+ up->dma = NULL;
+ }
+ }
+
+ ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
+ dev_name(port->dev), port);
+ if (ret < 0)
goto err;
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+
#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
#endif
@@ -610,8 +655,7 @@ err:
static void omap_8250_shutdown(struct uart_port *port)
{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = port->private_data;
flush_work(&priv->qos_work);
@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_get_sync(port->dev);
serial_out(up, UART_OMAP_WER, 0);
- serial8250_do_shutdown(port);
+
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ if (up->dma)
+ serial8250_release_dma(up);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ if (up->lcr & UART_LCR_SBC)
+ serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
+ free_irq(port->irq, port);
if (priv->wakeirq)
free_irq(priv->wakeirq, port);
}
@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
}
#endif
+static int omap8250_no_handle_irq(struct uart_port *port)
+{
+ /* IRQ has not been requested but handling irq? */
+ WARN_ONCE(1, "Unexpected irq handling before port startup\n");
+ return 0;
+}
+
static int omap8250_probe(struct platform_device *pdev)
{
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(&up, priv);
+ up.port.handle_irq = omap8250_no_handle_irq;
#ifdef CONFIG_SERIAL_8250_DMA
if (pdev->dev.of_node) {
/*
@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev)
ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
if (ret == 2) {
up.dma = &priv->omap8250_dma;
- up.port.handle_irq = omap_8250_dma_handle_irq;
priv->omap8250_dma.fn = the_no_dma_filter_fn;
priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6f5a0720a8c8..763eb20fe321 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock)
/*
* Transmit a character
- * There must be at least one free entry in the TX FIFO to accept the char.
*
- * Returns true if the FIFO might have space in it afterwards;
- * returns false if the FIFO definitely became full.
+ * Returns true if the character was successfully queued to the FIFO.
+ * Returns false otherwise.
*/
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
{
+ if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+ return false; /* unable to transmit character */
+
writew(c, uap->port.membase + UART01x_DR);
uap->port.icount.tx++;
- if (likely(uap->tx_irq_seen > 1))
- return true;
-
- return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
+ return true;
}
static bool pl011_tx_chars(struct uart_amba_port *uap)
@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap)
return false;
if (uap->port.x_char) {
- pl011_tx_char(uap, uap->port.x_char);
+ if (!pl011_tx_char(uap, uap->port.x_char))
+ goto done;
uap->port.x_char = 0;
--count;
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index c8cfa0637128..88250395b0ce 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data)
status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
count = RX_BUF_SIZE - state.residue;
+
+ if (readl(sport->port.membase + USR2) & USR2_IDLE) {
+ /* In condition [3] the SDMA counted up too early */
+ count--;
+
+ writel(USR2_IDLE, sport->port.membase + USR2);
+ }
+
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index fdab715a0631..c0eafa6fd403 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -339,7 +339,7 @@
#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
-#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1)
+#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
#define DWC3_DGCMD_CMDACT (1 << 10)
#define DWC3_DGCMD_CMDIOC (1 << 8)
@@ -355,7 +355,7 @@
#define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
-#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1)
+#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
#define DWC3_DEPCMD_CMDIOC (1 << 8)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bdb57069044..3507f880eb74 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
return ret;
}
- set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
return len;
}
break;
@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = ep->status;
if (io_data->read && ret > 0) {
ret = copy_to_iter(data, ret, &io_data->data);
- if (unlikely(iov_iter_count(&io_data->data)))
+ if (!ret)
ret = -EFAULT;
}
}
@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
- if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
- ffs_closed(ffs);
+ ffs_closed(ffs);
BUG_ON(ffs->gadget);
@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
ffs_obj->desc_ready = true;
ffs_obj->ffs_data = ffs;
- if (ffs_obj->ffs_ready_callback)
+ if (ffs_obj->ffs_ready_callback) {
ret = ffs_obj->ffs_ready_callback(ffs);
+ if (ret)
+ goto done;
+ }
+ set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
return ret;
@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->desc_ready = false;
- if (ffs_obj->ffs_closed_callback)
+ if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
+ ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
if (!ffs_obj->opts || ffs_obj->opts->no_configfs
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 259b656c0b3e..6316aa5b1c49 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
int result;
mutex_lock(&opts->lock);
- result = strlcpy(page, opts->id, PAGE_SIZE);
+ if (opts->id) {
+ result = strlcpy(page, opts->id, PAGE_SIZE);
+ } else {
+ page[0] = 0;
+ result = 0;
+ }
+
mutex_unlock(&opts->lock);
return result;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 9719abfb6145..7856b3394494 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (intf == 1) {
if (alt == 1) {
- config_ep_by_speed(cdev->gadget, f, out_ep);
+ err = config_ep_by_speed(cdev->gadget, f, out_ep);
+ if (err)
+ return err;
+
usb_ep_enable(out_ep);
out_ep->driver_data = audio;
audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 7b9ef7e257d2..e821931c965c 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
gfs_registered = true;
ret = usb_composite_probe(&gfs_driver);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ ++missing_funcs;
gfs_registered = false;
+ }
return ret;
}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index b808951491cc..99fd9a5667df 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
+ s3c2410_udc_set_pullup(udc, is_on);
return 0;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec8ac1674854..36bf089b708f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned long flags;
- int ret;
+ int ret, slot_id;
struct xhci_command *command;
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return 0;
+ /* xhci->slot_id and xhci->addr_dev are not thread-safe */
+ mutex_lock(&xhci->mutex);
spin_lock_irqsave(&xhci->lock, flags);
command->completion = &xhci->addr_dev;
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
+ mutex_unlock(&xhci->mutex);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
kfree(command);
return 0;
@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
+ slot_id = xhci->slot_id;
+ mutex_unlock(&xhci->mutex);
- if (!xhci->slot_id || command->status != COMP_SUCCESS) {
+ if (!slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID\n");
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
- if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
+ if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
- udev->slot_id = xhci->slot_id;
+ udev->slot_id = slot_id;
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
- struct xhci_command *command;
+ struct xhci_command *command = NULL;
+
+ mutex_lock(&xhci->mutex);
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Bad Slot ID %d", udev->slot_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
virt_dev = xhci->devs[udev->slot_id];
@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
*/
xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
udev->slot_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (setup == SETUP_CONTEXT_ONLY) {
@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
SLOT_STATE_DEFAULT) {
xhci_dbg(xhci, "Slot already in default state\n");
- return 0;
+ goto out;
}
}
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
- if (!command)
- return -ENOMEM;
+ if (!command) {
+ ret = -ENOMEM;
+ goto out;
+ }
command->in_ctx = virt_dev->in_ctx;
command->completion = &xhci->addr_dev;
@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
- kfree(command);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
* If this is the first Set Address since device plug-in or
@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"FIXME: allocate a command ring segment");
- kfree(command);
- return ret;
+ goto out;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
ret = -EINVAL;
break;
}
- if (ret) {
- kfree(command);
- return ret;
- }
+ if (ret)
+ goto out;
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Internal device address = %d",
le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
+out:
+ mutex_unlock(&xhci->mutex);
kfree(command);
- return 0;
+ return ret;
}
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return 0;
}
+ mutex_init(&xhci->mutex);
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
return 0;
}
+
+/*
+ * If an init function is provided, an exit function must also be provided
+ * to allow module unload.
+ */
+static void __exit xhci_hcd_fini(void) { }
+
module_init(xhci_hcd_init);
+module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea75e8ccd3c1..6977f8491fa7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1497,6 +1497,8 @@ struct xhci_hcd {
struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
+ /* these are not thread safe so use mutex */
+ struct mutex mutex;
struct completion addr_dev;
int slot_id;
/* For USB 3.0 LPM enable/disable. */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3789b08ef67b..6dca3d794ced 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (musb->ops->quirks)
musb->io.quirks = musb->ops->quirks;
- /* At least tusb6010 has it's own offsets.. */
- if (musb->ops->ep_offset)
- musb->io.ep_offset = musb->ops->ep_offset;
- if (musb->ops->ep_select)
- musb->io.ep_select = musb->ops->ep_select;
-
- /* ..and some devices use indexed offset or flat offset */
+ /* Most devices use indexed offset or flat offset */
if (musb->io.quirks & MUSB_INDEXED_EP) {
musb->io.ep_offset = musb_indexed_ep_offset;
musb->io.ep_select = musb_indexed_ep_select;
@@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb->io.ep_select = musb_flat_ep_select;
}
+ /* At least tusb6010 has its own offsets */
+ if (musb->ops->ep_offset)
+ musb->io.ep_offset = musb->ops->ep_offset;
+ if (musb->ops->ep_select)
+ musb->io.ep_select = musb->ops->ep_select;
+
if (musb->ops->fifo_mode)
fifo_mode = musb->ops->fifo_mode;
else
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 7225d526df04..03ab0c699f74 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
}
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_link_status_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
+ IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-link-status", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for link status irq\n");
@@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
}
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
+ IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-id-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for ID fall irq\n");
@@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
}
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
+ IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
"usb-vbus-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 845f658276b1..2b28443d07b9 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, tu);
tu->irq = platform_get_irq(pdev, 0);
- ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
+ ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
+ IRQF_ONESHOT,
"tahvo-vbus", tu);
if (ret) {
dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8597cf9cfceb..c0f5c652d272 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
{
struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
if (usbhs_pipe_is_busy(pipe))
return 0;
@@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
usbhs_pipe_data_sequence(pipe, pkt->sequence);
pkt->sequence = -1; /* -1 sequence will be ignored */
+ if (usbhs_pipe_is_dcp(pipe))
+ usbhsf_fifo_clear(pipe, fifo);
+
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
usbhs_pipe_enable(pipe);
usbhs_pipe_running(pipe, 1);
@@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
*is_done = 1;
usbhsf_rx_irq_ctrl(pipe, 0);
usbhs_pipe_running(pipe, 0);
- usbhs_pipe_disable(pipe); /* disable pipe first */
+ /*
+ * If function mode, since this controller is possible to enter
+ * Control Write status stage at this timing, this driver
+ * should not disable the pipe. If such a case happens, this
+ * controller is not able to complete the status stage.
+ */
+ if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_disable(pipe); /* disable pipe first */
}
/*
@@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
{
char name[16];
- snprintf(name, sizeof(name), "tx%d", channel);
- fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
- if (IS_ERR(fifo->tx_chan))
- fifo->tx_chan = NULL;
-
- snprintf(name, sizeof(name), "rx%d", channel);
- fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
- if (IS_ERR(fifo->rx_chan))
- fifo->rx_chan = NULL;
+ /*
+ * To avoid complex handing for DnFIFOs, the driver uses each
+ * DnFIFO as TX or RX direction (not bi-direction).
+ * So, the driver uses odd channels for TX, even channels for RX.
+ */
+ snprintf(name, sizeof(name), "ch%d", channel);
+ if (channel & 1) {
+ fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+ if (IS_ERR(fifo->tx_chan))
+ fifo->tx_chan = NULL;
+ } else {
+ fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+ if (IS_ERR(fifo->rx_chan))
+ fifo->rx_chan = NULL;
+ }
}
static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 9031750e7404..ffd739e31bfc 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8eb68a31cab6..4c8b3b82103d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e4f46f3c89c..792e054126de 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -155,6 +155,7 @@
#define XSENS_AWINDA_STATION_PID 0x0101
#define XSENS_AWINDA_DONGLE_PID 0x0102
#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
+#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
/* Xsens devices using FTDI VID */
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index e894eb278d83..eba1b7ac7294 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
+ cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
irq_set_affinity_hint(irq, mask);
}