diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio')
27 files changed, 2447 insertions, 664 deletions
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index b7f43254598f..f6f3ef9a93cf 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -26,7 +26,7 @@ config CHELSIO_T1 This driver supports Chelsio gigabit and 10-gigabit Ethernet cards. More information about adapter features and performance tuning is in - <file:Documentation/networking/device_drivers/chelsio/cxgb.rst>. + <file:Documentation/networking/device_drivers/ethernet/chelsio/cxgb.rst>. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index dcab94cc2dee..876f90e5795e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -350,167 +350,6 @@ struct cudbg_qdesc_info { #define IREG_NUM_ELEM 4 -static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { - {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ - {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ - {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ - {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ - {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ - {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ - {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ - {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ - {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ - {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ - {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ - {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ -}; - -static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { - {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ - {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ - {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ - {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ - {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ - {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ - {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ - {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ - {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ - {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ - {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ -}; - -static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { - {0x7e18, 0x7e1c, 0x0, 12} -}; - -static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { - {0x7e18, 0x7e1c, 0x0, 12} -}; - -static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { - {0x7e50, 0x7e54, 0x0, 13}, - {0x7e50, 0x7e54, 0x10, 6}, - {0x7e50, 0x7e54, 0x18, 21}, - {0x7e50, 0x7e54, 0x30, 32}, - {0x7e50, 0x7e54, 0x50, 22}, - {0x7e50, 0x7e54, 0x68, 12} -}; - -static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { - {0x7e50, 0x7e54, 0x0, 13}, - {0x7e50, 0x7e54, 0x10, 6}, - {0x7e50, 0x7e54, 0x18, 8}, - {0x7e50, 0x7e54, 0x20, 13}, - {0x7e50, 0x7e54, 0x30, 16}, - {0x7e50, 0x7e54, 0x40, 16}, - {0x7e50, 0x7e54, 0x50, 16}, - {0x7e50, 0x7e54, 0x60, 6}, - {0x7e50, 0x7e54, 0x68, 4} -}; - -static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { - {0x10cc, 0x10d0, 0x0, 16}, - {0x10cc, 0x10d4, 0x0, 16}, -}; - -static const u32 t6_sge_qbase_index_array[] = { - /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ - 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, -}; - -static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { - {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ - {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ - {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ -}; - -static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { - {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ - {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ -}; - -static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { - {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ - {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ -}; - -static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { - {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ - {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ -}; - #define CUDBG_NUM_PCIE_CONFIG_REGS 0x61 -static const u32 t5_pcie_config_array[][2] = { - {0x0, 0x34}, - {0x3c, 0x40}, - {0x50, 0x64}, - {0x70, 0x80}, - {0x94, 0xa0}, - {0xb0, 0xb8}, - {0xd0, 0xd4}, - {0x100, 0x128}, - {0x140, 0x148}, - {0x150, 0x164}, - {0x170, 0x178}, - {0x180, 0x194}, - {0x1a0, 0x1b8}, - {0x1c0, 0x208}, -}; - -static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { - {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ - {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ - {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ -}; - -static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { - {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ - {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ -}; - -static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { - {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ - {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ - {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ - {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ - {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ - {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ - {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ - {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ - {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ - {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ -}; - -static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { - {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ - {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ -}; - -static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { - {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ -}; #endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index fc3813050f0d..c84719e3ca08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -70,7 +70,8 @@ enum cudbg_dbg_entity_type { CUDBG_HMA_INDIRECT = 67, CUDBG_HMA = 68, CUDBG_QDESC = 70, - CUDBG_MAX_ENTITY = 71, + CUDBG_FLASH = 71, + CUDBG_MAX_ENTITY = 72, }; struct cudbg_init { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d8ab8e366818..75474f810249 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -15,6 +15,412 @@ #include "cudbg_lib.h" #include "cudbg_zlib.h" +static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ + {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ + {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ + {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ +}; + +static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ + {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ + {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ + {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ + {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ +}; + +static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 21}, + {0x7e50, 0x7e54, 0x30, 32}, + {0x7e50, 0x7e54, 0x50, 22}, + {0x7e50, 0x7e54, 0x68, 12} +}; + +static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 8}, + {0x7e50, 0x7e54, 0x20, 13}, + {0x7e50, 0x7e54, 0x30, 16}, + {0x7e50, 0x7e54, 0x40, 16}, + {0x7e50, 0x7e54, 0x50, 16}, + {0x7e50, 0x7e54, 0x60, 6}, + {0x7e50, 0x7e54, 0x68, 4} +}; + +static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { + {0x10cc, 0x10d0, 0x0, 16}, + {0x10cc, 0x10d4, 0x0, 16}, +}; + +static const u32 t6_sge_qbase_index_array[] = { + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, +}; + +static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { + {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ + {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ + {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ +}; + +static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { + {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ + {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ +}; + +static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { + {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ + {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ +}; + +static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { + {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ + {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ +}; + +static const u32 t5_pcie_config_array[][2] = { + {0x0, 0x34}, + {0x3c, 0x40}, + {0x50, 0x64}, + {0x70, 0x80}, + {0x94, 0xa0}, + {0xb0, 0xb8}, + {0xd0, 0xd4}, + {0x100, 0x128}, + {0x140, 0x148}, + {0x150, 0x164}, + {0x170, 0x178}, + {0x180, 0x194}, + {0x1a0, 0x1b8}, + {0x1c0, 0x208}, +}; + +static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ + {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ + {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ +}; + +static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ + {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ +}; + +static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ +}; + +static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ +}; + +static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { + {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ +}; + +u32 cudbg_get_entity_length(struct adapter *adap, u32 entity) +{ + struct cudbg_tcam tcam_region = { 0 }; + u32 value, n = 0, len = 0; + + switch (entity) { + case CUDBG_REG_DUMP: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T4: + len = T4_REGMAP_SIZE; + break; + case CHELSIO_T5: + case CHELSIO_T6: + len = T5_REGMAP_SIZE; + break; + default: + break; + } + break; + case CUDBG_DEV_LOG: + len = adap->params.devlog.size; + break; + case CUDBG_CIM_LA: + if (is_t6(adap->params.chip)) { + len = adap->params.cim_la_size / 10 + 1; + len *= 10 * sizeof(u32); + } else { + len = adap->params.cim_la_size / 8; + len *= 8 * sizeof(u32); + } + len += sizeof(u32); /* for reading CIM LA configuration */ + break; + case CUDBG_CIM_MA_LA: + len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); + break; + case CUDBG_CIM_QCFG: + len = sizeof(struct cudbg_cim_qcfg); + break; + case CUDBG_CIM_IBQ_TP0: + case CUDBG_CIM_IBQ_TP1: + case CUDBG_CIM_IBQ_ULP: + case CUDBG_CIM_IBQ_SGE0: + case CUDBG_CIM_IBQ_SGE1: + case CUDBG_CIM_IBQ_NCSI: + len = CIM_IBQ_SIZE * 4 * sizeof(u32); + break; + case CUDBG_CIM_OBQ_ULP0: + len = cudbg_cim_obq_size(adap, 0); + break; + case CUDBG_CIM_OBQ_ULP1: + len = cudbg_cim_obq_size(adap, 1); + break; + case CUDBG_CIM_OBQ_ULP2: + len = cudbg_cim_obq_size(adap, 2); + break; + case CUDBG_CIM_OBQ_ULP3: + len = cudbg_cim_obq_size(adap, 3); + break; + case CUDBG_CIM_OBQ_SGE: + len = cudbg_cim_obq_size(adap, 4); + break; + case CUDBG_CIM_OBQ_NCSI: + len = cudbg_cim_obq_size(adap, 5); + break; + case CUDBG_CIM_OBQ_RXQ0: + len = cudbg_cim_obq_size(adap, 6); + break; + case CUDBG_CIM_OBQ_RXQ1: + len = cudbg_cim_obq_size(adap, 7); + break; + case CUDBG_EDC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM0_BAR_A); + len = EDRAM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_EDC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM1_BAR_A); + len = EDRAM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_MC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + len = EXT_MEM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_MC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_RSS: + len = t4_chip_rss_size(adap) * sizeof(u16); + break; + case CUDBG_RSS_VF_CONF: + len = adap->params.arch.vfcount * + sizeof(struct cudbg_rss_vf_conf); + break; + case CUDBG_PATH_MTU: + len = NMTUS * sizeof(u16); + break; + case CUDBG_PM_STATS: + len = sizeof(struct cudbg_pm_stats); + break; + case CUDBG_HW_SCHED: + len = sizeof(struct cudbg_hw_sched); + break; + case CUDBG_TP_INDIRECT: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + n = sizeof(t5_tp_pio_array) + + sizeof(t5_tp_tm_pio_array) + + sizeof(t5_tp_mib_index_array); + break; + case CHELSIO_T6: + n = sizeof(t6_tp_pio_array) + + sizeof(t6_tp_tm_pio_array) + + sizeof(t6_tp_mib_index_array); + break; + default: + break; + } + n = n / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; + case CUDBG_SGE_INDIRECT: + len = sizeof(struct ireg_buf) * 2 + + sizeof(struct sge_qbase_reg_field); + break; + case CUDBG_ULPRX_LA: + len = sizeof(struct cudbg_ulprx_la); + break; + case CUDBG_TP_LA: + len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); + break; + case CUDBG_MEMINFO: + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo); + break; + case CUDBG_CIM_PIF_LA: + len = sizeof(struct cudbg_cim_pif_la); + len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); + break; + case CUDBG_CLK: + len = sizeof(struct cudbg_clk_info); + break; + case CUDBG_PCIE_INDIRECT: + n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_PM_INDIRECT: + n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_TID_INFO: + len = sizeof(struct cudbg_tid_info_region_rev1); + break; + case CUDBG_PCIE_CONFIG: + len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; + break; + case CUDBG_DUMP_CONTEXT: + len = cudbg_dump_context_size(adap); + break; + case CUDBG_MPS_TCAM: + len = sizeof(struct cudbg_mps_tcam) * + adap->params.arch.mps_tcam_size; + break; + case CUDBG_VPD_DATA: + len = sizeof(struct cudbg_vpd_data); + break; + case CUDBG_LE_TCAM: + cudbg_fill_le_tcam_info(adap, &tcam_region); + len = sizeof(struct cudbg_tcam) + + sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + break; + case CUDBG_CCTRL: + len = sizeof(u16) * NMTUS * NCCTRL_WIN; + break; + case CUDBG_MA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_ma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + } + break; + case CUDBG_ULPTX_LA: + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la); + break; + case CUDBG_UP_CIM_INDIRECT: + n = 0; + if (is_t5(adap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(adap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; + case CUDBG_PBT_TABLE: + len = sizeof(struct cudbg_pbt_tables); + break; + case CUDBG_MBOX_LOG: + len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; + break; + case CUDBG_HMA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_hma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + } + break; + case CUDBG_HMA: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & HMA_MUX_F) { + /* In T6, there's no MC1. So, HMA shares MC1 + * address space. + */ + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_QDESC: + cudbg_fill_qdesc_num_and_size(adap, NULL, &len); + break; + default: + break; + } + + return len; +} + static int cudbg_do_compression(struct cudbg_init *pdbg_init, struct cudbg_buffer *pin_buff, struct cudbg_buffer *dbg_buff) @@ -3158,3 +3564,40 @@ out_free: return rc; } + +int cudbg_collect_flash(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + u32 count = padap->params.sf_size, n; + struct cudbg_buffer temp_buff = {0}; + u32 addr, i; + int rc; + + addr = FLASH_EXP_ROM_START; + + for (i = 0; i < count; i += SF_PAGE_SIZE) { + n = min_t(u32, count - i, SF_PAGE_SIZE); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out; + } + rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0); + if (rc) + goto out; + + addr += (n * 4); + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out; + } + } + +out: + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 10ee6ed1d932..d6d6cd298930 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -162,7 +162,11 @@ int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_flash(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +u32 cudbg_get_entity_length(struct adapter *adap, u32 entity); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index cf69c6edcfec..9cb8b229c1b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -139,6 +139,64 @@ enum cc_fec { FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ }; +enum { + CXGB4_ETHTOOL_FLASH_FW = 1, + CXGB4_ETHTOOL_FLASH_PHY = 2, + CXGB4_ETHTOOL_FLASH_BOOT = 3, + CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 +}; + +struct cxgb4_bootcfg_data { + __le16 signature; + __u8 reserved[2]; +}; + +struct cxgb4_pcir_data { + __le32 signature; /* Signature. The string "PCIR" */ + __le16 vendor_id; /* Vendor Identification */ + __le16 device_id; /* Device Identification */ + __u8 vital_product[2]; /* Pointer to Vital Product Data */ + __u8 length[2]; /* PCIR Data Structure Length */ + __u8 revision; /* PCIR Data Structure Revision */ + __u8 class_code[3]; /* Class Code */ + __u8 image_length[2]; /* Image Length. Multiple of 512B */ + __u8 code_revision[2]; /* Revision Level of Code/Data */ + __u8 code_type; + __u8 indicator; + __u8 reserved[2]; +}; + +/* BIOS boot headers */ +struct cxgb4_pci_exp_rom_header { + __le16 signature; /* ROM Signature. Should be 0xaa55 */ + __u8 reserved[22]; /* Reserved per processor Architecture data */ + __le16 pcir_offset; /* Offset to PCI Data Structure */ +}; + +/* Legacy PCI Expansion ROM Header */ +struct legacy_pci_rom_hdr { + __u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ + __u8 size512; /* Current Image Size in units of 512 bytes */ + __u8 initentry_point[4]; + __u8 cksum; /* Checksum computed on the entire Image */ + __u8 reserved[16]; /* Reserved */ + __le16 pcir_offset; /* Offset to PCI Data Struture */ +}; + +#define CXGB4_HDR_CODE1 0x00 +#define CXGB4_HDR_CODE2 0x03 +#define CXGB4_HDR_INDI 0x80 + +/* BOOT constants */ +enum { + BOOT_CFG_SIG = 0x4243, + BOOT_SIZE_INC = 512, + BOOT_SIGNATURE = 0xaa55, + BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header), + BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC, + PCIR_SIGNATURE = 0x52494350 +}; + struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ @@ -474,6 +532,12 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) +struct cxgb4_ethtool_lb_test { + struct completion completion; + int result; + int loopback; +}; + struct fw_info { u8 chip; char *fs_name; @@ -492,6 +556,11 @@ struct trace_params { unsigned char port; }; +struct cxgb4_fw_data { + __be32 signature; + __u8 reserved[4]; +}; + /* Firmware Port Capabilities types. */ typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ @@ -616,6 +685,13 @@ struct port_info { u8 rx_cchan; bool tc_block_shared; + + /* Mirror VI information */ + u16 viid_mirror; + u16 nmirrorqsets; + u32 vi_mirror_count; + struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */ + struct cxgb4_ethtool_lb_test ethtool_lb; }; struct dentry; @@ -642,6 +718,13 @@ enum { ULP_CRYPTO_KTLS_INLINE = 1 << 3, }; +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024 +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64 +#define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5 +#define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8 + +#define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72 + struct rx_sw_desc; struct sge_fl { /* SGE free-buffer queue state */ @@ -891,12 +974,15 @@ struct sge { struct sge_eohw_txq *eohw_txq; struct sge_ofld_rxq *eohw_rxq; + struct sge_eth_rxq *mirror_rxq[NCHAN]; + u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ u16 ofldqsets; /* # of active ofld queue sets */ u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 eoqsets; /* # of ETHOFLD queues */ + u16 mirrorqsets; /* # of Mirror queues */ u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; @@ -1003,6 +1089,17 @@ struct mps_entries_ref { refcount_t refcnt; }; +struct cxgb4_ethtool_filter_info { + u32 *loc_array; /* Array holding the actual TIDs set to filters */ + unsigned long *bmap; /* Bitmap for managing filters in use */ + u32 in_use; /* # of filters in use */ +}; + +struct cxgb4_ethtool_filter { + u32 nentries; /* Adapter wide number of supported filters */ + struct cxgb4_ethtool_filter_info *port; /* Per port entry */ +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -1019,9 +1116,7 @@ struct adapter { int msg_enable; __be16 vxlan_port; - u8 vxlan_port_cnt; __be16 geneve_port; - u8 geneve_port_cnt; struct adapter_params params; struct cxgb4_virt_res vres; @@ -1128,6 +1223,9 @@ struct adapter { /* TC MATCHALL classifier offload */ struct cxgb4_tc_matchall *tc_matchall; + + /* Ethtool n-tuple */ + struct cxgb4_ethtool_filter *ethtool_filters; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1340,6 +1438,8 @@ enum { NAT_MODE_ALL /* NAT on entire 4-tuple */ }; +#define CXGB4_FILTER_TYPE_MAX 2 + /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the * firmware command. The use of bit-field structure elements is purely to @@ -1504,6 +1604,7 @@ void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); +int cxgb4_selftest_lb_pkt(struct net_device *netdev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); @@ -1736,8 +1837,7 @@ int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); -int t4_load_phy_fw(struct adapter *adap, - int win, spinlock_t *lock, +int t4_load_phy_fw(struct adapter *adap, int win, int (*phy_fw_version)(const u8 *, size_t), const u8 *phy_fw_data, size_t phy_fw_size); int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); @@ -1781,6 +1881,8 @@ int t4_init_rss_mode(struct adapter *adap, int mbox); int t4_init_portinfo(struct port_info *pi, int mbox, int port, int pf, int vf, u8 mac[]); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf, + u16 *mirror_viid); void t4_fatal_err(struct adapter *adapter); unsigned int t4_chip_rss_size(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, @@ -1890,8 +1992,8 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok); + unsigned int viid_mirror, int mtu, int promisc, int all_multi, + int bcast, int vlanex, bool sleep_ok); int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); @@ -1988,6 +2090,10 @@ void t4_register_netevent_notifier(void); int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); +int t4_load_boot(struct adapter *adap, u8 *boot_data, + unsigned int boot_addr, unsigned int size); +int t4_load_bootcfg(struct adapter *adap, + const u8 *cfg_data, unsigned int size); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap); @@ -2061,6 +2167,8 @@ int cxgb_open(struct net_device *dev); int cxgb_close(struct net_device *dev); void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); void cxgb4_quiesce_rx(struct sge_rspq *q); +int cxgb4_port_mirror_alloc(struct net_device *dev); +void cxgb4_port_mirror_free(struct net_device *dev); #ifdef CONFIG_CHELSIO_TLS_DEVICE int cxgb4_set_ktls_feature(struct adapter *adap, bool enable); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index e374b413d9ac..77648e4ab4cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -66,249 +66,9 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, }; -static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) -{ - struct cudbg_tcam tcam_region = { 0 }; - u32 value, n = 0, len = 0; - - switch (entity) { - case CUDBG_REG_DUMP: - switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { - case CHELSIO_T4: - len = T4_REGMAP_SIZE; - break; - case CHELSIO_T5: - case CHELSIO_T6: - len = T5_REGMAP_SIZE; - break; - default: - break; - } - break; - case CUDBG_DEV_LOG: - len = adap->params.devlog.size; - break; - case CUDBG_CIM_LA: - if (is_t6(adap->params.chip)) { - len = adap->params.cim_la_size / 10 + 1; - len *= 10 * sizeof(u32); - } else { - len = adap->params.cim_la_size / 8; - len *= 8 * sizeof(u32); - } - len += sizeof(u32); /* for reading CIM LA configuration */ - break; - case CUDBG_CIM_MA_LA: - len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); - break; - case CUDBG_CIM_QCFG: - len = sizeof(struct cudbg_cim_qcfg); - break; - case CUDBG_CIM_IBQ_TP0: - case CUDBG_CIM_IBQ_TP1: - case CUDBG_CIM_IBQ_ULP: - case CUDBG_CIM_IBQ_SGE0: - case CUDBG_CIM_IBQ_SGE1: - case CUDBG_CIM_IBQ_NCSI: - len = CIM_IBQ_SIZE * 4 * sizeof(u32); - break; - case CUDBG_CIM_OBQ_ULP0: - len = cudbg_cim_obq_size(adap, 0); - break; - case CUDBG_CIM_OBQ_ULP1: - len = cudbg_cim_obq_size(adap, 1); - break; - case CUDBG_CIM_OBQ_ULP2: - len = cudbg_cim_obq_size(adap, 2); - break; - case CUDBG_CIM_OBQ_ULP3: - len = cudbg_cim_obq_size(adap, 3); - break; - case CUDBG_CIM_OBQ_SGE: - len = cudbg_cim_obq_size(adap, 4); - break; - case CUDBG_CIM_OBQ_NCSI: - len = cudbg_cim_obq_size(adap, 5); - break; - case CUDBG_CIM_OBQ_RXQ0: - len = cudbg_cim_obq_size(adap, 6); - break; - case CUDBG_CIM_OBQ_RXQ1: - len = cudbg_cim_obq_size(adap, 7); - break; - case CUDBG_EDC0: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EDRAM0_ENABLE_F) { - value = t4_read_reg(adap, MA_EDRAM0_BAR_A); - len = EDRAM0_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_EDC1: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EDRAM1_ENABLE_F) { - value = t4_read_reg(adap, MA_EDRAM1_BAR_A); - len = EDRAM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_MC0: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EXT_MEM0_ENABLE_F) { - value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); - len = EXT_MEM0_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_MC1: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EXT_MEM1_ENABLE_F) { - value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); - len = EXT_MEM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_RSS: - len = t4_chip_rss_size(adap) * sizeof(u16); - break; - case CUDBG_RSS_VF_CONF: - len = adap->params.arch.vfcount * - sizeof(struct cudbg_rss_vf_conf); - break; - case CUDBG_PATH_MTU: - len = NMTUS * sizeof(u16); - break; - case CUDBG_PM_STATS: - len = sizeof(struct cudbg_pm_stats); - break; - case CUDBG_HW_SCHED: - len = sizeof(struct cudbg_hw_sched); - break; - case CUDBG_TP_INDIRECT: - switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { - case CHELSIO_T5: - n = sizeof(t5_tp_pio_array) + - sizeof(t5_tp_tm_pio_array) + - sizeof(t5_tp_mib_index_array); - break; - case CHELSIO_T6: - n = sizeof(t6_tp_pio_array) + - sizeof(t6_tp_tm_pio_array) + - sizeof(t6_tp_mib_index_array); - break; - default: - break; - } - n = n / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - break; - case CUDBG_SGE_INDIRECT: - len = sizeof(struct ireg_buf) * 2 + - sizeof(struct sge_qbase_reg_field); - break; - case CUDBG_ULPRX_LA: - len = sizeof(struct cudbg_ulprx_la); - break; - case CUDBG_TP_LA: - len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); - break; - case CUDBG_MEMINFO: - len = sizeof(struct cudbg_ver_hdr) + - sizeof(struct cudbg_meminfo); - break; - case CUDBG_CIM_PIF_LA: - len = sizeof(struct cudbg_cim_pif_la); - len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); - break; - case CUDBG_CLK: - len = sizeof(struct cudbg_clk_info); - break; - case CUDBG_PCIE_INDIRECT: - n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - break; - case CUDBG_PM_INDIRECT: - n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - break; - case CUDBG_TID_INFO: - len = sizeof(struct cudbg_tid_info_region_rev1); - break; - case CUDBG_PCIE_CONFIG: - len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; - break; - case CUDBG_DUMP_CONTEXT: - len = cudbg_dump_context_size(adap); - break; - case CUDBG_MPS_TCAM: - len = sizeof(struct cudbg_mps_tcam) * - adap->params.arch.mps_tcam_size; - break; - case CUDBG_VPD_DATA: - len = sizeof(struct cudbg_vpd_data); - break; - case CUDBG_LE_TCAM: - cudbg_fill_le_tcam_info(adap, &tcam_region); - len = sizeof(struct cudbg_tcam) + - sizeof(struct cudbg_tid_data) * tcam_region.max_tid; - break; - case CUDBG_CCTRL: - len = sizeof(u16) * NMTUS * NCCTRL_WIN; - break; - case CUDBG_MA_INDIRECT: - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { - n = sizeof(t6_ma_ireg_array) / - (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - } - break; - case CUDBG_ULPTX_LA: - len = sizeof(struct cudbg_ver_hdr) + - sizeof(struct cudbg_ulptx_la); - break; - case CUDBG_UP_CIM_INDIRECT: - n = 0; - if (is_t5(adap->params.chip)) - n = sizeof(t5_up_cim_reg_array) / - ((IREG_NUM_ELEM + 1) * sizeof(u32)); - else if (is_t6(adap->params.chip)) - n = sizeof(t6_up_cim_reg_array) / - ((IREG_NUM_ELEM + 1) * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - break; - case CUDBG_PBT_TABLE: - len = sizeof(struct cudbg_pbt_tables); - break; - case CUDBG_MBOX_LOG: - len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; - break; - case CUDBG_HMA_INDIRECT: - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { - n = sizeof(t6_hma_ireg_array) / - (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - } - break; - case CUDBG_HMA: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & HMA_MUX_F) { - /* In T6, there's no MC1. So, HMA shares MC1 - * address space. - */ - value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); - len = EXT_MEM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_QDESC: - cudbg_fill_qdesc_num_and_size(adap, NULL, &len); - break; - default: - break; - } - - return len; -} +static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = { + { CUDBG_FLASH, cudbg_collect_flash }, +}; u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) { @@ -319,17 +79,20 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) if (flag & CXGB4_ETH_DUMP_HW) { for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { entity = cxgb4_collect_hw_dump[i].entity; - len += cxgb4_get_entity_length(adap, entity); + len += cudbg_get_entity_length(adap, entity); } } if (flag & CXGB4_ETH_DUMP_MEM) { for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) { entity = cxgb4_collect_mem_dump[i].entity; - len += cxgb4_get_entity_length(adap, entity); + len += cudbg_get_entity_length(adap, entity); } } + if (flag & CXGB4_ETH_DUMP_FLASH) + len += adap->params.sf_size; + /* If compression is enabled, a smaller destination buffer is enough */ wsize = cudbg_get_workspace_size(); if (wsize && len > CUDBG_DUMP_BUFF_SIZE) @@ -468,6 +231,13 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, buf, &total_size); + if (flag & CXGB4_ETH_DUMP_FLASH) + cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, + cxgb4_collect_flash_dump, + ARRAY_SIZE(cxgb4_collect_flash_dump), + buf, + &total_size); + cudbg_free_compress_buff(&cudbg_init); cudbg_hdr->data_len = total_size; if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index 66b805c7a92c..c04a49b6378d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -27,6 +27,7 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE, CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */ CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ + CXGB4_ETH_DUMP_FLASH = (1 << 2), /* Dump flash memory */ }; #define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index b477b8842905..05f33b7e3677 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2742,6 +2742,58 @@ do { \ } r -= eth_entries; + for_each_port(adap, j) { + struct port_info *pi = adap2pinfo(adap, j); + const struct sge_eth_rxq *rx; + + mutex_lock(&pi->vi_mirror_mutex); + if (!pi->vi_mirror_count) { + mutex_unlock(&pi->vi_mirror_mutex); + continue; + } + + if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) { + r -= DIV_ROUND_UP(pi->nmirrorqsets, 4); + mutex_unlock(&pi->vi_mirror_mutex); + continue; + } + + rx = &s->mirror_rxq[j][r * 4]; + n = min(4, pi->nmirrorqsets - 4 * r); + + S("QType:", "Mirror-Rxq"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxCSO:", stats.rx_cso); + RL("VLANxtract:", stats.vlan_ex); + RL("LROmerged:", stats.lro_merged); + RL("LROpackets:", stats.lro_pkts); + RL("RxDrops:", stats.rx_drops); + RL("RxBadPkts:", stats.bad_rx_pkts); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); + RL("FLStarving:", fl.starving); + + mutex_unlock(&pi->vi_mirror_mutex); + goto out; + } + if (!adap->tc_mqprio) goto skip_mqprio; @@ -3098,9 +3150,10 @@ unlock: return 0; } -static int sge_queue_entries(const struct adapter *adap) +static int sge_queue_entries(struct adapter *adap) { int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0; + int mirror_rxq_entries = 0; if (adap->tc_mqprio) { struct cxgb4_tc_port_mqprio *port_mqprio; @@ -3123,6 +3176,15 @@ static int sge_queue_entries(const struct adapter *adap) mutex_unlock(&adap->tc_mqprio->mqprio_mutex); } + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + mutex_lock(&pi->vi_mirror_mutex); + if (pi->vi_mirror_count) + mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4); + mutex_unlock(&pi->vi_mirror_mutex); + } + if (!is_uld(adap)) goto lld_only; @@ -3137,7 +3199,7 @@ static int sge_queue_entries(const struct adapter *adap) mutex_unlock(&uld_mutex); lld_only: - return DIV_ROUND_UP(adap->sge.ethqsets, 4) + + return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries + eohw_entries + eosw_entries + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index f27be1132d37..9f3173f86eed 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -10,6 +10,8 @@ #include "t4_regs.h" #include "t4fw_api.h" #include "cxgb4_cudbg.h" +#include "cxgb4_filter.h" +#include "cxgb4_tc_flower.h" #define EEPROM_MAGIC 0x38E2F10C @@ -23,6 +25,23 @@ static void set_msglevel(struct net_device *dev, u32 val) netdev2adap(dev)->msg_enable = val; } +enum cxgb4_ethtool_tests { + CXGB4_ETHTOOL_LB_TEST, + CXGB4_ETHTOOL_MAX_TEST, +}; + +static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = { + "Loop back test (offline)", +}; + +static const char * const flash_region_strings[] = { + "All", + "Firmware", + "PHY Firmware", + "Boot", + "Boot CFG", +}; + static const char stats_strings[][ETH_GSTRING_LEN] = { "tx_octets_ok ", "tx_frames_ok ", @@ -156,6 +175,8 @@ static int get_sset_count(struct net_device *dev, int sset) ARRAY_SIZE(loopback_stats_strings); case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(cxgb4_priv_flags_strings); + case ETH_SS_TEST: + return ARRAY_SIZE(cxgb4_selftest_strings); default: return -EOPNOTSUPP; } @@ -218,6 +239,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) } else if (stringset == ETH_SS_PRIV_FLAGS) { memcpy(data, cxgb4_priv_flags_strings, sizeof(cxgb4_priv_flags_strings)); + } else if (stringset == ETH_SS_TEST) { + memcpy(data, cxgb4_selftest_strings, + sizeof(cxgb4_selftest_strings)); } } @@ -1235,15 +1259,211 @@ out: return err; } -static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev, + const u8 *data, u32 size) { + struct adapter *adap = netdev2adap(netdev); int ret; - const struct firmware *fw; + + ret = t4_load_bootcfg(adap, data, size); + if (ret) + dev_err(adap->pdev_dev, "Failed to load boot cfg image\n"); + + return ret; +} + +static int cxgb4_ethtool_flash_boot(struct net_device *netdev, + const u8 *bdata, u32 size) +{ + struct adapter *adap = netdev2adap(netdev); + unsigned int offset; + u8 *data; + int ret; + + data = kmemdup(bdata, size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A))); + + ret = t4_load_boot(adap, data, offset, size); + if (ret) + dev_err(adap->pdev_dev, "Failed to load boot image\n"); + + kfree(data); + return ret; +} + +#define CXGB4_PHY_SIG 0x130000ea + +static int cxgb4_validate_phy_image(const u8 *data, u32 *size) +{ + struct cxgb4_fw_data *header; + + header = (struct cxgb4_fw_data *)data; + if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG) + return -EINVAL; + + return 0; +} + +static int cxgb4_ethtool_flash_phy(struct net_device *netdev, + const u8 *data, u32 size) +{ + struct adapter *adap = netdev2adap(netdev); + int ret; + + ret = cxgb4_validate_phy_image(data, NULL); + if (ret) { + dev_err(adap->pdev_dev, "PHY signature mismatch\n"); + return ret; + } + + spin_lock_bh(&adap->win0_lock); + ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size); + spin_unlock_bh(&adap->win0_lock); + if (ret) + dev_err(adap->pdev_dev, "Failed to load PHY FW\n"); + + return ret; +} + +static int cxgb4_ethtool_flash_fw(struct net_device *netdev, + const u8 *data, u32 size) +{ struct adapter *adap = netdev2adap(netdev); unsigned int mbox = PCIE_FW_MASTER_M + 1; - u32 pcie_fw; + int ret; + + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & CXGB4_FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, data, size, 1); + if (ret) + dev_err(adap->pdev_dev, + "Failed to flash firmware\n"); + + return ret; +} + +static int cxgb4_ethtool_flash_region(struct net_device *netdev, + const u8 *data, u32 size, u32 region) +{ + struct adapter *adap = netdev2adap(netdev); + int ret; + + switch (region) { + case CXGB4_ETHTOOL_FLASH_FW: + ret = cxgb4_ethtool_flash_fw(netdev, data, size); + break; + case CXGB4_ETHTOOL_FLASH_PHY: + ret = cxgb4_ethtool_flash_phy(netdev, data, size); + break; + case CXGB4_ETHTOOL_FLASH_BOOT: + ret = cxgb4_ethtool_flash_boot(netdev, data, size); + break; + case CXGB4_ETHTOOL_FLASH_BOOTCFG: + ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (!ret) + dev_info(adap->pdev_dev, + "loading %s successful, reload cxgb4 driver\n", + flash_region_strings[region]); + return ret; +} + +#define CXGB4_FW_SIG 0x4368656c +#define CXGB4_FW_SIG_OFFSET 0x160 + +static int cxgb4_validate_fw_image(const u8 *data, u32 *size) +{ + struct cxgb4_fw_data *header; + + header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET]; + if (be32_to_cpu(header->signature) != CXGB4_FW_SIG) + return -EINVAL; + + if (size) + *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512; + + return 0; +} + +static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size) +{ + struct cxgb4_bootcfg_data *header; + + header = (struct cxgb4_bootcfg_data *)data; + if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) + return -EINVAL; + + return 0; +} + +static int cxgb4_validate_boot_image(const u8 *data, u32 *size) +{ + struct cxgb4_pci_exp_rom_header *exp_header; + struct cxgb4_pcir_data *pcir_header; + struct legacy_pci_rom_hdr *header; + const u8 *cur_header = data; + u16 pcir_offset; + + exp_header = (struct cxgb4_pci_exp_rom_header *)data; + + if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE) + return -EINVAL; + + if (size) { + do { + header = (struct legacy_pci_rom_hdr *)cur_header; + pcir_offset = le16_to_cpu(header->pcir_offset); + pcir_header = (struct cxgb4_pcir_data *)(cur_header + + pcir_offset); + + *size += header->size512 * 512; + cur_header += header->size512 * 512; + } while (!(pcir_header->indicator & CXGB4_HDR_INDI)); + } + + return 0; +} + +static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size) +{ + if (!cxgb4_validate_fw_image(data, size)) + return CXGB4_ETHTOOL_FLASH_FW; + if (!cxgb4_validate_boot_image(data, size)) + return CXGB4_ETHTOOL_FLASH_BOOT; + if (!cxgb4_validate_phy_image(data, size)) + return CXGB4_ETHTOOL_FLASH_PHY; + if (!cxgb4_validate_bootcfg_image(data, size)) + return CXGB4_ETHTOOL_FLASH_BOOTCFG; + + return -EOPNOTSUPP; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct adapter *adap = netdev2adap(netdev); + const struct firmware *fw; unsigned int master; u8 master_vld = 0; + const u8 *fw_data; + size_t fw_size; + u32 size = 0; + u32 pcie_fw; + int region; + int ret; pcie_fw = t4_read_reg(adap, PCIE_FW_A); master = PCIE_FW_MASTER_G(pcie_fw); @@ -1261,19 +1481,32 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) if (ret < 0) return ret; - /* If the adapter has been fully initialized then we'll go ahead and - * try to get the firmware's cooperation in upgrading to the new - * firmware image otherwise we'll try to do the entire job from the - * host ... and we always "force" the operation in this path. - */ - if (adap->flags & CXGB4_FULL_INIT_DONE) - mbox = adap->mbox; + fw_data = fw->data; + fw_size = fw->size; + if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) { + while (fw_size > 0) { + size = 0; + region = cxgb4_ethtool_get_flash_region(fw_data, &size); + if (region < 0 || !size) { + ret = region; + goto out_free_fw; + } + + ret = cxgb4_ethtool_flash_region(netdev, fw_data, size, + region); + if (ret) + goto out_free_fw; - ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); + fw_data += size; + fw_size -= size; + } + } else { + ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size, + ef->region); + } + +out_free_fw: release_firmware(fw); - if (!ret) - dev_info(adap->pdev_dev, - "loaded firmware %s, reload cxgb4 driver\n", ef->data); return ret; } @@ -1355,10 +1588,120 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, return -EPERM; } +static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap, + u32 ftid) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + + if (ftid < t->nhpftids) + f = &adap->tids.hpftid_tab[ftid]; + else if (ftid < t->nftids) + f = &adap->tids.ftid_tab[ftid - t->nhpftids]; + else + f = lookup_tid(&adap->tids, ftid); + + return f; +} + +static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs, + struct ch_filter_specification *dfs) +{ + switch (dfs->val.proto) { + case IPPROTO_TCP: + if (dfs->type) + fs->flow_type = TCP_V6_FLOW; + else + fs->flow_type = TCP_V4_FLOW; + break; + case IPPROTO_UDP: + if (dfs->type) + fs->flow_type = UDP_V6_FLOW; + else + fs->flow_type = UDP_V4_FLOW; + break; + } + + if (dfs->type) { + fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport); + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport); + fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport); + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport); + memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0], + sizeof(fs->h_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0], + sizeof(fs->m_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0], + sizeof(fs->h_u.tcp_ip6_spec.ip6dst)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0], + sizeof(fs->m_u.tcp_ip6_spec.ip6dst)); + fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos; + fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos; + } else { + fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport); + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport); + fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport); + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport); + memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0], + sizeof(fs->h_u.tcp_ip4_spec.ip4src)); + memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0], + sizeof(fs->m_u.tcp_ip4_spec.ip4src)); + memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0], + sizeof(fs->h_u.tcp_ip4_spec.ip4dst)); + memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0], + sizeof(fs->m_u.tcp_ip4_spec.ip4dst)); + fs->h_u.tcp_ip4_spec.tos = dfs->val.tos; + fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos; + } + fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan); + fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan); + fs->flow_type |= FLOW_EXT; + + if (dfs->action == FILTER_DROP) + fs->ring_cookie = RX_CLS_FLOW_DISC; + else + fs->ring_cookie = dfs->iq; +} + +static int cxgb4_ntuple_get_filter(struct net_device *dev, + struct ethtool_rxnfc *cmd, + unsigned int loc) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = netdev2adap(dev); + struct filter_entry *f; + int ftid; + + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; + + /* Check for maximum filter range */ + if (!adap->ethtool_filters) + return -EOPNOTSUPP; + + if (loc >= adap->ethtool_filters->nentries) + return -ERANGE; + + if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap)) + return -ENOENT; + + ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc]; + + /* Fetch filter_entry */ + f = cxgb4_get_filter_entry(adap, ftid); + + cxgb4_fill_filter_rule(&cmd->fs, &f->fs); + + return 0; +} + static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = netdev2adap(dev); + unsigned int count = 0, index = 0; + int ret = 0; switch (info->cmd) { case ETHTOOL_GRXFH: { @@ -1414,10 +1757,144 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = pi->nqsets; return 0; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = + adap->ethtool_filters->port[pi->port_id].in_use; + return 0; + case ETHTOOL_GRXCLSRULE: + return cxgb4_ntuple_get_filter(dev, info, info->fs.location); + case ETHTOOL_GRXCLSRLALL: + info->data = adap->ethtool_filters->nentries; + while (count < info->rule_cnt) { + ret = cxgb4_ntuple_get_filter(dev, info, index); + if (!ret) + rules[count++] = index; + index++; + } + return 0; } + return -EOPNOTSUPP; } +static int cxgb4_ntuple_del_filter(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct cxgb4_ethtool_filter_info *filter_info; + struct adapter *adapter = netdev2adap(dev); + struct port_info *pi = netdev_priv(dev); + struct filter_entry *f; + u32 filter_id; + int ret; + + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; /* can still change nfilters */ + + if (!adapter->ethtool_filters) + return -EOPNOTSUPP; + + if (cmd->fs.location >= adapter->ethtool_filters->nentries) { + dev_err(adapter->pdev_dev, + "Location must be < %u", + adapter->ethtool_filters->nentries); + return -ERANGE; + } + + filter_info = &adapter->ethtool_filters->port[pi->port_id]; + + if (!test_bit(cmd->fs.location, filter_info->bmap)) + return -ENOENT; + + filter_id = filter_info->loc_array[cmd->fs.location]; + f = cxgb4_get_filter_entry(adapter, filter_id); + + ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id); + if (ret) + goto err; + + clear_bit(cmd->fs.location, filter_info->bmap); + filter_info->in_use--; + +err: + return ret; +} + +/* Add Ethtool n-tuple filters. */ +static int cxgb4_ntuple_set_filter(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct cxgb4_ethtool_filter_info *filter_info; + struct adapter *adapter = netdev2adap(netdev); + struct port_info *pi = netdev_priv(netdev); + struct ch_filter_specification fs; + struct ethtool_rx_flow_rule *flow; + u32 tid; + int ret; + + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; /* can still change nfilters */ + + if (!adapter->ethtool_filters) + return -EOPNOTSUPP; + + if (cmd->fs.location >= adapter->ethtool_filters->nentries) { + dev_err(adapter->pdev_dev, + "Location must be < %u", + adapter->ethtool_filters->nentries); + return -ERANGE; + } + + if (test_bit(cmd->fs.location, + adapter->ethtool_filters->port[pi->port_id].bmap)) + return -EEXIST; + + memset(&fs, 0, sizeof(fs)); + + input.fs = &cmd->fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) { + ret = PTR_ERR(flow); + goto exit; + } + + fs.hitcnts = 1; + + ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location, + NULL, &fs, &tid); + if (ret) + goto free; + + filter_info = &adapter->ethtool_filters->port[pi->port_id]; + + filter_info->loc_array[cmd->fs.location] = tid; + set_bit(cmd->fs.location, filter_info->bmap); + filter_info->in_use++; + +free: + ethtool_rx_flow_rule_destroy(flow); +exit: + return ret; +} + +static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = cxgb4_ntuple_set_filter(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = cxgb4_ntuple_del_filter(dev, cmd); + break; + default: + break; + } + + return ret; +} + static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump) { struct adapter *adapter = netdev2adap(dev); @@ -1593,6 +2070,44 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) return 0; } +static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status) +{ + int dev_state = netif_running(netdev); + + if (dev_state) { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + + *lb_status = cxgb4_selftest_lb_pkt(netdev); + + if (dev_state) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } +} + +static void cxgb4_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + + memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST); + + if (!(adap->flags & CXGB4_FULL_INIT_DONE) || + !(adap->flags & CXGB4_FW_OK)) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + if (eth_test->flags & ETH_TEST_FL_OFFLINE) + cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]); + + if (data[CXGB4_ETHTOOL_LB_TEST]) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -1623,9 +2138,11 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_regs_len = get_regs_len, .get_regs = get_regs, .get_rxnfc = get_rxnfc, + .set_rxnfc = set_rxnfc, .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, .set_rxfh = set_rss_table, + .self_test = cxgb4_self_test, .flash_device = set_flash, .get_ts_info = get_ts_info, .set_dump = set_dump, @@ -1637,6 +2154,87 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_priv_flags = cxgb4_set_priv_flags, }; +void cxgb4_cleanup_ethtool_filters(struct adapter *adap) +{ + struct cxgb4_ethtool_filter_info *eth_filter_info; + u8 i; + + if (!adap->ethtool_filters) + return; + + eth_filter_info = adap->ethtool_filters->port; + + if (eth_filter_info) { + for (i = 0; i < adap->params.nports; i++) { + kvfree(eth_filter_info[i].loc_array); + kfree(eth_filter_info[i].bmap); + } + kfree(eth_filter_info); + } + + kfree(adap->ethtool_filters); +} + +int cxgb4_init_ethtool_filters(struct adapter *adap) +{ + struct cxgb4_ethtool_filter_info *eth_filter_info; + struct cxgb4_ethtool_filter *eth_filter; + struct tid_info *tids = &adap->tids; + u32 nentries, i; + int ret; + + eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL); + if (!eth_filter) + return -ENOMEM; + + eth_filter_info = kcalloc(adap->params.nports, + sizeof(*eth_filter_info), + GFP_KERNEL); + if (!eth_filter_info) { + ret = -ENOMEM; + goto free_eth_filter; + } + + eth_filter->port = eth_filter_info; + + nentries = tids->nhpftids + tids->nftids; + if (is_hashfilter(adap)) + nentries += tids->nhash + + (adap->tids.stid_base - adap->tids.tid_base); + eth_filter->nentries = nentries; + + for (i = 0; i < adap->params.nports; i++) { + eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL); + if (!eth_filter->port[i].loc_array) { + ret = -ENOMEM; + goto free_eth_finfo; + } + + eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries), + sizeof(unsigned long), + GFP_KERNEL); + if (!eth_filter->port[i].bmap) { + ret = -ENOMEM; + goto free_eth_finfo; + } + } + + adap->ethtool_filters = eth_filter; + return 0; + +free_eth_finfo: + while (i-- > 0) { + kfree(eth_filter->port[i].bmap); + kvfree(eth_filter->port[i].loc_array); + } + kfree(eth_filter_info); + +free_eth_filter: + kfree(eth_filter); + + return ret; +} + void cxgb4_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &cxgb_ethtool_ops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index d02d346629b3..650db92cb11c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1159,6 +1159,11 @@ bool is_filter_exact_match(struct adapter *adap, if (!is_hashfilter(adap)) return false; + if ((atomic_read(&adap->tids.hash_tids_in_use) + + atomic_read(&adap->tids.tids_in_use)) >= + (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base))) + return false; + /* Keep tunnel VNI match disabled for hash-filters for now */ if (fs->mask.encap_vld) return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index b0751c0611ec..807a8dafec45 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -53,4 +53,6 @@ void clear_all_filters(struct adapter *adapter); void init_hash_filter(struct adapter *adap); bool is_filter_exact_match(struct adapter *adap, struct ch_filter_specification *fs); +void cxgb4_cleanup_ethtool_filters(struct adapter *adap); +int cxgb4_init_ethtool_filters(struct adapter *adap); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0329a6b52087..de078a5bf23e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -435,8 +435,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, + mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, sleep_ok); } @@ -503,15 +503,16 @@ set_hash: */ static int link_start(struct net_device *dev) { - int ret; struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->pf; + unsigned int mb = pi->adapter->mbox; + int ret; /* * We do not set address filters and promiscuity here, the stack does * that step explicitly. */ - ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, + dev->mtu, -1, -1, -1, !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret == 0) ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, @@ -822,6 +823,31 @@ static void adap_config_hpfilter(struct adapter *adapter) "HP filter region isn't supported by FW\n"); } +static int cxgb4_config_rss(const struct port_info *pi, u16 *rss, + u16 rss_size, u16 viid) +{ + struct adapter *adap = pi->adapter; + int ret; + + ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, + rss_size); + if (ret) + return ret; + + /* If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + return t4_config_vi_rss(adap, adap->mbox, viid, + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | + FW_RSS_VI_CONFIG_CMD_UDPEN_F, + rss[0]); +} + /** * cxgb4_write_rss - write the RSS table for a given port * @pi: the port @@ -833,10 +859,10 @@ static void adap_config_hpfilter(struct adapter *adapter) */ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) { - u16 *rss; - int i, err; struct adapter *adapter = pi->adapter; const struct sge_eth_rxq *rxq; + int i, err; + u16 *rss; rxq = &adapter->sge.ethrxq[pi->first_qset]; rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); @@ -847,21 +873,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) for (i = 0; i < pi->rss_size; i++, queues++) rss[i] = rxq[*queues].rspq.abs_id; - err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, - pi->rss_size, rss, pi->rss_size); - /* If Tunnel All Lookup isn't specified in the global RSS - * Configuration, then we need to specify a default Ingress - * Queue for any ingress packets which aren't hashed. We'll - * use our first ingress queue ... - */ - if (!err) - err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, - FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | - FW_RSS_VI_CONFIG_CMD_UDPEN_F, - rss[0]); + err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); kfree(rss); return err; } @@ -1259,15 +1271,15 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { - const struct port_info *pi = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; + const struct port_info *pi = netdev_priv(dev); int err; if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; - err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, - -1, -1, -1, + err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, + pi->viid_mirror, -1, -1, -1, -1, !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; @@ -1285,6 +1297,292 @@ static int setup_debugfs(struct adapter *adap) return 0; } +static void cxgb4_port_mirror_free_rxq(struct adapter *adap, + struct sge_eth_rxq *mirror_rxq) +{ + if ((adap->flags & CXGB4_FULL_INIT_DONE) && + !(adap->flags & CXGB4_SHUTTING_DOWN)) + cxgb4_quiesce_rx(&mirror_rxq->rspq); + + if (adap->flags & CXGB4_USING_MSIX) { + cxgb4_clear_msix_aff(mirror_rxq->msix->vec, + mirror_rxq->msix->aff_mask); + free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); + } + + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); +} + +static int cxgb4_port_mirror_alloc_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_eth_rxq *mirror_rxq; + struct sge *s = &adap->sge; + int ret = 0, msix = 0; + u16 i, rxqid; + u16 *rss; + + if (!pi->vi_mirror_count) + return 0; + + if (s->mirror_rxq[pi->port_id]) + return 0; + + mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); + if (!mirror_rxq) + return -ENOMEM; + + s->mirror_rxq[pi->port_id] = mirror_rxq; + + if (!(adap->flags & CXGB4_USING_MSIX)) + msix = -((int)adap->sge.intrq.abs_id + 1); + + for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { + mirror_rxq = &s->mirror_rxq[pi->port_id][i]; + + /* Allocate Mirror Rxqs */ + if (msix >= 0) { + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) { + ret = msix; + goto out_free_queues; + } + + mirror_rxq->msix = &adap->msix_info[msix]; + snprintf(mirror_rxq->msix->desc, + sizeof(mirror_rxq->msix->desc), + "%s-mirrorrxq%d", dev->name, i); + } + + init_rspq(adap, &mirror_rxq->rspq, + CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC, + CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT, + CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM, + CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE); + + mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; + + ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, + dev, msix, &mirror_rxq->fl, + t4_ethrx_handler, NULL, 0); + if (ret) + goto out_free_msix_idx; + + /* Setup MSI-X vectors for Mirror Rxqs */ + if (adap->flags & CXGB4_USING_MSIX) { + ret = request_irq(mirror_rxq->msix->vec, + t4_sge_intr_msix, 0, + mirror_rxq->msix->desc, + &mirror_rxq->rspq); + if (ret) + goto out_free_rxq; + + cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, + &mirror_rxq->msix->aff_mask, i); + } + + /* Start NAPI for Mirror Rxqs */ + cxgb4_enable_rx(adap, &mirror_rxq->rspq); + } + + /* Setup RSS for Mirror Rxqs */ + rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); + if (!rss) { + ret = -ENOMEM; + goto out_free_queues; + } + + mirror_rxq = &s->mirror_rxq[pi->port_id][0]; + for (i = 0; i < pi->rss_size; i++) + rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; + + ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); + kfree(rss); + if (ret) + goto out_free_queues; + + return 0; + +out_free_rxq: + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); + +out_free_msix_idx: + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); + +out_free_queues: + while (rxqid-- > 0) + cxgb4_port_mirror_free_rxq(adap, + &s->mirror_rxq[pi->port_id][rxqid]); + + kfree(s->mirror_rxq[pi->port_id]); + s->mirror_rxq[pi->port_id] = NULL; + return ret; +} + +static void cxgb4_port_mirror_free_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge *s = &adap->sge; + u16 i; + + if (!pi->vi_mirror_count) + return; + + if (!s->mirror_rxq[pi->port_id]) + return; + + for (i = 0; i < pi->nmirrorqsets; i++) + cxgb4_port_mirror_free_rxq(adap, + &s->mirror_rxq[pi->port_id][i]); + + kfree(s->mirror_rxq[pi->port_id]); + s->mirror_rxq[pi->port_id] = NULL; +} + +static int cxgb4_port_mirror_start(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret, idx = -1; + + if (!pi->vi_mirror_count) + return 0; + + /* Mirror VIs can be created dynamically after stack had + * already setup Rx modes like MTU, promisc, allmulti, etc. + * on main VI. So, parse what the stack had setup on the + * main VI and update the same on the mirror VI. + */ + ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, + dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); + if (ret) { + dev_err(adap->pdev_dev, + "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + return ret; + } + + /* Enable replication bit for the device's MAC address + * in MPS TCAM, so that the packets for the main VI are + * replicated to mirror VI. + */ + ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, + dev->dev_addr, true, NULL); + if (ret) { + dev_err(adap->pdev_dev, + "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + return ret; + } + + /* Enabling a Virtual Interface can result in an interrupt + * during the processing of the VI Enable command and, in some + * paths, result in an attempt to issue another command in the + * interrupt context. Thus, we disable interrupts during the + * course of the VI Enable command ... + */ + local_bh_disable(); + ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, + false); + local_bh_enable(); + if (ret) + dev_err(adap->pdev_dev, + "Failed starting Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + + return ret; +} + +static void cxgb4_port_mirror_stop(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + if (!pi->vi_mirror_count) + return; + + t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, + false); +} + +int cxgb4_port_mirror_alloc(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret = 0; + + if (!pi->nmirrorqsets) + return -EOPNOTSUPP; + + mutex_lock(&pi->vi_mirror_mutex); + if (pi->viid_mirror) { + pi->vi_mirror_count++; + goto out_unlock; + } + + ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, + &pi->viid_mirror); + if (ret) + goto out_unlock; + + pi->vi_mirror_count = 1; + + if (adap->flags & CXGB4_FULL_INIT_DONE) { + ret = cxgb4_port_mirror_alloc_queues(dev); + if (ret) + goto out_free_vi; + + ret = cxgb4_port_mirror_start(dev); + if (ret) + goto out_free_queues; + } + + mutex_unlock(&pi->vi_mirror_mutex); + return 0; + +out_free_queues: + cxgb4_port_mirror_free_queues(dev); + +out_free_vi: + pi->vi_mirror_count = 0; + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); + pi->viid_mirror = 0; + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); + return ret; +} + +void cxgb4_port_mirror_free(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + mutex_lock(&pi->vi_mirror_mutex); + if (!pi->viid_mirror) + goto out_unlock; + + if (pi->vi_mirror_count > 1) { + pi->vi_mirror_count--; + goto out_unlock; + } + + cxgb4_port_mirror_stop(dev); + cxgb4_port_mirror_free_queues(dev); + + pi->vi_mirror_count = 0; + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); + pi->viid_mirror = 0; + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); +} + /* * upper-layer driver support */ @@ -2557,8 +2855,29 @@ int cxgb_open(struct net_device *dev) return err; err = link_start(dev); - if (!err) - netif_tx_start_all_queues(dev); + if (err) + return err; + + if (pi->nmirrorqsets) { + mutex_lock(&pi->vi_mirror_mutex); + err = cxgb4_port_mirror_alloc_queues(dev); + if (err) + goto out_unlock; + + err = cxgb4_port_mirror_start(dev); + if (err) + goto out_free_queues; + mutex_unlock(&pi->vi_mirror_mutex); + } + + netif_tx_start_all_queues(dev); + return 0; + +out_free_queues: + cxgb4_port_mirror_free_queues(dev); + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); return err; } @@ -2576,7 +2895,17 @@ int cxgb_close(struct net_device *dev) cxgb4_dcb_reset(dev); dcb_tx_queue_prio_enable(dev, false); #endif - return ret; + if (ret) + return ret; + + if (pi->nmirrorqsets) { + mutex_lock(&pi->vi_mirror_mutex); + cxgb4_port_mirror_stop(dev); + cxgb4_port_mirror_free_queues(dev); + mutex_unlock(&pi->vi_mirror_mutex); + } + + return 0; } int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, @@ -2842,11 +3171,11 @@ static void cxgb_set_rxmode(struct net_device *dev) static int cxgb_change_mtu(struct net_device *dev, int new_mtu) { - int ret; struct port_info *pi = netdev_priv(dev); + int ret; - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, - -1, -1, -1, true); + ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, + pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; return ret; @@ -3403,129 +3732,71 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -static void cxgb_del_udp_tunnel(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int cxgb_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; int ret = 0, i; - if (chip_ver < CHELSIO_T6) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: - if (!adapter->vxlan_port_cnt || - adapter->vxlan_port != ti->port) - return; /* Invalid VxLAN destination port */ - - adapter->vxlan_port_cnt--; - if (adapter->vxlan_port_cnt) - return; - adapter->vxlan_port = 0; t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); break; case UDP_TUNNEL_TYPE_GENEVE: - if (!adapter->geneve_port_cnt || - adapter->geneve_port != ti->port) - return; /* Invalid GENEVE destination port */ - - adapter->geneve_port_cnt--; - if (adapter->geneve_port_cnt) - return; - adapter->geneve_port = 0; t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); break; default: - return; + return -EINVAL; } /* Matchall mac entries can be deleted only after all tunnel ports * are brought down or removed. */ if (!adapter->rawf_cnt) - return; + return 0; for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); ret = t4_free_raw_mac_filt(adapter, pi->viid, match_all_mac, match_all_mac, - adapter->rawf_start + - pi->port_id, + adapter->rawf_start + pi->port_id, 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", i); - return; + return ret; } } + + return 0; } -static void cxgb_add_udp_tunnel(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int cxgb_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; int i, ret; - if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: - /* Callback for adding vxlan port can be called with the same - * port for both IPv4 and IPv6. We should not disable the - * offloading when the same port for both protocols is added - * and later one of them is removed. - */ - if (adapter->vxlan_port_cnt && - adapter->vxlan_port == ti->port) { - adapter->vxlan_port_cnt++; - return; - } - - /* We will support only one VxLAN port */ - if (adapter->vxlan_port_cnt) { - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", - be16_to_cpu(adapter->vxlan_port), - be16_to_cpu(ti->port)); - return; - } - adapter->vxlan_port = ti->port; - adapter->vxlan_port_cnt = 1; - t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); break; case UDP_TUNNEL_TYPE_GENEVE: - if (adapter->geneve_port_cnt && - adapter->geneve_port == ti->port) { - adapter->geneve_port_cnt++; - return; - } - - /* We will support only one GENEVE port */ - if (adapter->geneve_port_cnt) { - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", - be16_to_cpu(adapter->geneve_port), - be16_to_cpu(ti->port)); - return; - } - adapter->geneve_port = ti->port; - adapter->geneve_port_cnt = 1; - t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); break; default: - return; + return -EINVAL; } /* Create a 'match all' mac filter entry for inner mac, @@ -3540,18 +3811,27 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, ret = t4_alloc_raw_mac_filt(adapter, pi->viid, match_all_mac, match_all_mac, - adapter->rawf_start + - pi->port_id, + adapter->rawf_start + pi->port_id, 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", be16_to_cpu(ti->port)); - cxgb_del_udp_tunnel(netdev, ti); - return; + return ret; } } + + return 0; } +static const struct udp_tunnel_nic_info cxgb_udp_tunnels = { + .set_port = cxgb_udp_tunnel_set_port, + .unset_port = cxgb_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + static netdev_features_t cxgb_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -3601,8 +3881,8 @@ static const struct net_device_ops cxgb4_netdev_ops = { #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, - .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, - .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, }; @@ -4147,9 +4427,10 @@ static int adap_init0_phy(struct adapter *adap) /* Load PHY Firmware onto adapter. */ - ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, - phy_info->phy_fw_version, + spin_lock_bh(&adap->win0_lock); + ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, (u8 *)phyf->data, phyf->size); + spin_unlock_bh(&adap->win0_lock); if (ret < 0) dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", -ret); @@ -5503,6 +5784,19 @@ static int cfg_queues(struct adapter *adap) avail_qsets -= s->eoqsets; } + /* Mirror queues must follow same scheme as normal Ethernet + * Queues, when there are enough queues available. Otherwise, + * allocate at least 1 queue per port. If even 1 queue is not + * available, then disable mirror queues support. + */ + if (avail_qsets >= s->max_ethqsets) + s->mirrorqsets = s->max_ethqsets; + else if (avail_qsets >= adap->params.nports) + s->mirrorqsets = adap->params.nports; + else + s->mirrorqsets = 0; + avail_qsets -= s->mirrorqsets; + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; @@ -5616,8 +5910,8 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, static int enable_msix(struct adapter *adap) { - u32 eth_need, uld_need = 0, ethofld_need = 0; - u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0; + u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0; + u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0; u8 num_uld = 0, nchan = adap->params.nports; u32 i, want, need, num_vec; struct sge *s = &adap->sge; @@ -5648,6 +5942,12 @@ static int enable_msix(struct adapter *adap) need += ethofld_need; } + if (s->mirrorqsets) { + want += s->mirrorqsets; + mirror_need = nchan; + need += mirror_need; + } + want += EXTRA_VECS; need += EXTRA_VECS; @@ -5681,8 +5981,10 @@ static int enable_msix(struct adapter *adap) adap->params.ethofld = 0; s->ofldqsets = 0; s->eoqsets = 0; + s->mirrorqsets = 0; uld_need = 0; ethofld_need = 0; + mirror_need = 0; } num_vec = allocated; @@ -5696,6 +5998,8 @@ static int enable_msix(struct adapter *adap) ofldqsets = nchan; if (is_ethofld(adap)) eoqsets = ethofld_need; + if (s->mirrorqsets) + mirrorqsets = mirror_need; num_vec -= need; while (num_vec) { @@ -5727,12 +6031,25 @@ static int enable_msix(struct adapter *adap) num_vec -= uld_need; } } + + if (s->mirrorqsets) { + while (num_vec) { + if (num_vec < mirror_need || + mirrorqsets > s->mirrorqsets) + break; + + mirrorqsets++; + num_vec -= mirror_need; + } + } } else { ethqsets = s->max_ethqsets; if (is_uld(adap)) ofldqsets = s->ofldqsets; if (is_ethofld(adap)) eoqsets = s->eoqsets; + if (s->mirrorqsets) + mirrorqsets = s->mirrorqsets; } if (ethqsets < s->max_ethqsets) { @@ -5748,6 +6065,15 @@ static int enable_msix(struct adapter *adap) if (is_ethofld(adap)) s->eoqsets = eoqsets; + if (s->mirrorqsets) { + s->mirrorqsets = mirrorqsets; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->nmirrorqsets = s->mirrorqsets / nchan; + mutex_init(&pi->vi_mirror_mutex); + } + } + /* map for msix */ ret = alloc_msix_info(adap, allocated); if (ret) @@ -5759,8 +6085,9 @@ static int enable_msix(struct adapter *adap) } dev_info(adap->pdev_dev, - "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n", - allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld); + "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", + allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, + s->mirrorqsets); kfree(entries); return 0; @@ -5861,6 +6188,7 @@ static void free_some_resources(struct adapter *adapter) cxgb4_cleanup_tc_mqprio(adapter); cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); + cxgb4_cleanup_ethtool_filters(adapter); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); kfree(adapter->sge.starving_fl); @@ -6371,7 +6699,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_TC; + NETIF_F_HW_TC | NETIF_F_NTUPLE; if (chip_ver > CHELSIO_T5) { netdev->hw_enc_features |= NETIF_F_IP_CSUM | @@ -6384,6 +6712,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_HW_TLS_RECORD; + + if (adapter->rawf_cnt) + netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; } if (highdma) @@ -6494,6 +6825,24 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) i); } + if (is_offload(adapter) || is_hashfilter(adapter)) { + if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { + u32 v; + + v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A); + if (chip_ver <= CHELSIO_T5) { + adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); + v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A); + adapter->tids.hash_base = v / 4; + } else { + adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; + v = t4_read_reg(adapter, + T6_LE_DB_HASH_TID_BASE_A); + adapter->tids.hash_base = v; + } + } + } + if (tid_init(&adapter->tids) < 0) { dev_warn(&pdev->dev, "could not allocate TID table, " "continuing\n"); @@ -6515,22 +6864,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cxgb4_init_tc_matchall(adapter)) dev_warn(&pdev->dev, "could not offload tc matchall, continuing\n"); - } - - if (is_offload(adapter) || is_hashfilter(adapter)) { - if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { - u32 hash_base, hash_reg; - - if (chip_ver <= CHELSIO_T5) { - hash_reg = LE_DB_TID_HASHBASE_A; - hash_base = t4_read_reg(adapter, hash_reg); - adapter->tids.hash_base = hash_base / 4; - } else { - hash_reg = T6_LE_DB_HASH_TID_BASE_A; - hash_base = t4_read_reg(adapter, hash_reg); - adapter->tids.hash_base = hash_base; - } - } + if (cxgb4_init_ethtool_filters(adapter)) + dev_warn(&pdev->dev, + "could not initialize ethtool filters, continuing\n"); } /* See what interrupts we'll be using */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 59b65d4db086..f642c1b475c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -77,10 +77,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, } static void cxgb4_process_flow_match(struct net_device *dev, - struct flow_cls_offload *cls, + struct flow_rule *rule, struct ch_filter_specification *fs) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); u16 addr_type = 0; if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -88,6 +87,10 @@ static void cxgb4_process_flow_match(struct net_device *dev, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { @@ -220,9 +223,8 @@ static void cxgb4_process_flow_match(struct net_device *dev, } static int cxgb4_validate_flow_match(struct net_device *dev, - struct flow_cls_offload *cls) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; u16 ethtype_mask = 0; u16 ethtype_key = 0; @@ -383,6 +385,7 @@ void cxgb4_process_flow_actions(struct net_device *in, case FLOW_ACTION_DROP: fs->action = FILTER_DROP; break; + case FLOW_ACTION_MIRRED: case FLOW_ACTION_REDIRECT: { struct net_device *out = act->dev; struct port_info *pi = netdev_priv(out); @@ -426,6 +429,11 @@ void cxgb4_process_flow_actions(struct net_device *in, process_pedit_field(fs, val, mask, offset, htype); } break; + case FLOW_ACTION_QUEUE: + fs->action = FILTER_PASS; + fs->dirsteer = 1; + fs->iq = act->queue.index; + break; default: break; } @@ -535,7 +543,8 @@ static bool valid_pedit_action(struct net_device *dev, int cxgb4_validate_flow_actions(struct net_device *dev, struct flow_action *actions, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + u8 matchall_filter) { struct flow_action_entry *act; bool act_redir = false; @@ -552,11 +561,19 @@ int cxgb4_validate_flow_actions(struct net_device *dev, case FLOW_ACTION_DROP: /* Do nothing */ break; + case FLOW_ACTION_MIRRED: case FLOW_ACTION_REDIRECT: { struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; - unsigned int i; bool found = false; + unsigned int i; + + if (act->id == FLOW_ACTION_MIRRED && + !matchall_filter) { + NL_SET_ERR_MSG_MOD(extack, + "Egress mirror action is only supported for tc-matchall"); + return -EOPNOTSUPP; + } target_dev = act->dev; for_each_port(adap, i) { @@ -610,6 +627,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev, act_pedit = true; } break; + case FLOW_ACTION_QUEUE: + /* Do nothing. cxgb4_set_filter will validate */ + break; default: netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; @@ -683,33 +703,22 @@ out_unlock: spin_unlock_bh(&t->ftid_lock); } -int cxgb4_tc_flower_replace(struct net_device *dev, - struct flow_cls_offload *cls) +int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, + u32 tc_prio, struct netlink_ext_ack *extack, + struct ch_filter_specification *fs, u32 *tid) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); - struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adap = netdev2adap(dev); - struct ch_tc_flower_entry *ch_flower; - struct ch_filter_specification *fs; struct filter_ctx ctx; u8 inet_family; int fidx, ret; - if (cxgb4_validate_flow_actions(dev, &rule->action, extack)) + if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0)) return -EOPNOTSUPP; - if (cxgb4_validate_flow_match(dev, cls)) + if (cxgb4_validate_flow_match(dev, rule)) return -EOPNOTSUPP; - ch_flower = allocate_flower_entry(); - if (!ch_flower) { - netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); - return -ENOMEM; - } - - fs = &ch_flower->fs; - fs->hitcnts = 1; - cxgb4_process_flow_match(dev, cls, fs); + cxgb4_process_flow_match(dev, rule, fs); cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); @@ -720,12 +729,11 @@ int cxgb4_tc_flower_replace(struct net_device *dev, * existing rules. */ fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash, - cls->common.prio); + tc_prio); if (fidx < 0) { NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); - ret = -ENOMEM; - goto free_entry; + return -ENOMEM; } if (fidx < adap->tids.nhpftids) { @@ -739,42 +747,70 @@ int cxgb4_tc_flower_replace(struct net_device *dev, if (fs->hash) fidx = 0; - fs->tc_prio = cls->common.prio; - fs->tc_cookie = cls->cookie; + fs->tc_prio = tc_prio; init_completion(&ctx.completion); ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); if (ret) { netdev_err(dev, "%s: filter creation err %d\n", __func__, ret); - goto free_entry; + return ret; } /* Wait for reply */ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); - if (!ret) { - ret = -ETIMEDOUT; - goto free_entry; - } + if (!ret) + return -ETIMEDOUT; - ret = ctx.result; /* Check if hw returned error for filter creation */ + if (ctx.result) + return ctx.result; + + *tid = ctx.tid; + + if (fs->hash) + cxgb4_tc_flower_hash_prio_add(adap, tc_prio); + + return 0; +} + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + struct ch_filter_specification *fs; + int ret; + + ch_flower = allocate_flower_entry(); + if (!ch_flower) { + netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); + return -ENOMEM; + } + + fs = &ch_flower->fs; + fs->hitcnts = 1; + fs->tc_cookie = cls->cookie; + + ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs, + &ch_flower->filter_id); if (ret) goto free_entry; ch_flower->tc_flower_cookie = cls->cookie; - ch_flower->filter_id = ctx.tid; ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, adap->flower_ht_params); if (ret) goto del_filter; - if (fs->hash) - cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio); - return 0; del_filter: + if (fs->hash) + cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio); + cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); free_entry: @@ -782,23 +818,38 @@ free_entry: return ret; } +int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio, + struct ch_filter_specification *fs, int tid) +{ + struct adapter *adap = netdev2adap(dev); + u8 hash; + int ret; + + hash = fs->hash; + + ret = cxgb4_del_filter(dev, tid, fs); + if (ret) + return ret; + + if (hash) + cxgb4_tc_flower_hash_prio_del(adap, tc_prio); + + return ret; +} + int cxgb4_tc_flower_destroy(struct net_device *dev, struct flow_cls_offload *cls) { struct adapter *adap = netdev2adap(dev); struct ch_tc_flower_entry *ch_flower; - u32 tc_prio; - bool hash; int ret; ch_flower = ch_flower_lookup(adap, cls->cookie); if (!ch_flower) return -ENOENT; - hash = ch_flower->fs.hash; - tc_prio = ch_flower->fs.tc_prio; - - ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); + ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio, + &ch_flower->fs, ch_flower->filter_id); if (ret) goto err; @@ -810,9 +861,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, } kfree_rcu(ch_flower, rcu); - if (hash) - cxgb4_tc_flower_hash_prio_del(adap, tc_prio); - err: return ret; } @@ -892,7 +940,7 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, + packets - ofld_stats->packet_count, 0, ofld_stats->last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 0a30c96b81ff..6296e1d5a12b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -113,7 +113,8 @@ void cxgb4_process_flow_actions(struct net_device *in, struct ch_filter_specification *fs); int cxgb4_validate_flow_actions(struct net_device *dev, struct flow_action *actions, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + u8 matchall_filter); int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls); @@ -121,6 +122,11 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, struct flow_cls_offload *cls); int cxgb4_tc_flower_stats(struct net_device *dev, struct flow_cls_offload *cls); +int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, + u32 tc_prio, struct netlink_ext_ack *extack, + struct ch_filter_specification *fs, u32 *tid); +int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio, + struct ch_filter_specification *fs, int tid); int cxgb4_init_tc_flower(struct adapter *adap); void cxgb4_cleanup_tc_flower(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index c88c47a14fbb..2e309f6673f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -188,23 +188,83 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; } -static int cxgb4_matchall_alloc_filter(struct net_device *dev, +static int cxgb4_matchall_mirror_alloc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); + struct flow_action_entry *act; + int ret; + u32 i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + flow_action_for_each(i, act, &cls->rule->action) { + if (act->id == FLOW_ACTION_MIRRED) { + ret = cxgb4_port_mirror_alloc(dev); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Couldn't allocate mirror"); + return ret; + } + + tc_port_matchall->ingress.viid_mirror = pi->viid_mirror; + break; + } + } + + return 0; +} + +static void cxgb4_matchall_mirror_free(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (!tc_port_matchall->ingress.viid_mirror) + return; + + cxgb4_port_mirror_free(dev); + tc_port_matchall->ingress.viid_mirror = 0; +} + +static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type], + &tc_port_matchall->ingress.fs[filter_type]); + if (ret) + return ret; + + tc_port_matchall->ingress.tid[filter_type] = 0; + return 0; +} + +static int cxgb4_matchall_add_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + u8 filter_type) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); struct ch_filter_specification *fs; int ret, fidx; /* Get a free filter entry TID, where we can insert this new * rule. Only insert rule if its prio doesn't conflict with * existing rules. - * - * 1 slot is enough to create a wildcard matchall VIID rule. */ - fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio); + fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET, + false, cls->common.prio); if (fidx < 0) { NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); @@ -212,13 +272,14 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, } tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; - fs = &tc_port_matchall->ingress.fs; + fs = &tc_port_matchall->ingress.fs[filter_type]; memset(fs, 0, sizeof(*fs)); if (fidx < adap->tids.nhpftids) fs->prio = 1; fs->tc_prio = cls->common.prio; fs->tc_cookie = cls->cookie; + fs->type = filter_type; fs->hitcnts = 1; fs->val.pfvf_vld = 1; @@ -231,9 +292,39 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, if (ret) return ret; - tc_port_matchall->ingress.tid = fidx; + tc_port_matchall->ingress.tid[filter_type] = fidx; + return 0; +} + +static int cxgb4_matchall_alloc_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret, i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + ret = cxgb4_matchall_mirror_alloc(dev, cls); + if (ret) + return ret; + + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_matchall_add_filter(dev, cls, i); + if (ret) + goto out_free; + } + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + +out_free: + while (i-- > 0) + cxgb4_matchall_del_filter(dev, i); + + cxgb4_matchall_mirror_free(dev); + return ret; } static int cxgb4_matchall_free_filter(struct net_device *dev) @@ -242,18 +333,21 @@ static int cxgb4_matchall_free_filter(struct net_device *dev) struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; + u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; - ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, - &tc_port_matchall->ingress.fs); - if (ret) - return ret; + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_matchall_del_filter(dev, i); + if (ret) + return ret; + } + + cxgb4_matchall_mirror_free(dev); tc_port_matchall->ingress.packets = 0; tc_port_matchall->ingress.bytes = 0; tc_port_matchall->ingress.last_used = 0; - tc_port_matchall->ingress.tid = 0; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; return 0; } @@ -279,7 +373,7 @@ int cxgb4_tc_matchall_replace(struct net_device *dev, ret = cxgb4_validate_flow_actions(dev, &cls_matchall->rule->action, - extack); + extack, 1); if (ret) return ret; @@ -309,8 +403,12 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev, tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (ingress) { + /* All the filter types of this matchall rule save the + * same cookie. So, checking for the first one is + * enough. + */ if (cls_matchall->cookie != - tc_port_matchall->ingress.fs.tc_cookie) + tc_port_matchall->ingress.fs[0].tc_cookie) return -ENOENT; return cxgb4_matchall_free_filter(dev); @@ -326,27 +424,35 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev, int cxgb4_tc_matchall_stats(struct net_device *dev, struct tc_cls_matchall_offload *cls_matchall) { + u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0; struct cxgb4_tc_port_matchall *tc_port_matchall; + struct cxgb4_matchall_ingress_entry *ingress; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - u64 packets, bytes; int ret; + u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) return -ENOENT; - ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, - &packets, &bytes, - tc_port_matchall->ingress.fs.hash); - if (ret) - return ret; + ingress = &tc_port_matchall->ingress; + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_get_filter_counters(dev, ingress->tid[i], + &tmp_packets, &tmp_bytes, + ingress->fs[i].hash); + if (ret) + return ret; + + packets += tmp_packets; + bytes += tmp_bytes; + } if (tc_port_matchall->ingress.packets != packets) { flow_stats_update(&cls_matchall->stats, bytes - tc_port_matchall->ingress.bytes, packets - tc_port_matchall->ingress.packets, - tc_port_matchall->ingress.last_used, + 0, tc_port_matchall->ingress.last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); tc_port_matchall->ingress.packets = packets; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h index ab6b5683dfd3..fe7ec423a4c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h @@ -19,8 +19,10 @@ struct cxgb4_matchall_egress_entry { struct cxgb4_matchall_ingress_entry { enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ - u32 tid; /* Index to hardware filter entry */ - struct ch_filter_specification fs; /* Filter entry */ + u32 tid[CXGB4_FILTER_TYPE_MAX]; /* Index to hardware filter entries */ + /* Filter entries */ + struct ch_filter_specification fs[CXGB4_FILTER_TYPE_MAX]; + u16 viid_mirror; /* Identifier for allocated Mirror VI */ u64 bytes; /* # of bytes hitting the filter */ u64 packets; /* # of packets hitting the filter */ u64 last_used; /* Last updated jiffies time */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index dbce99b209d6..a963fd0b4540 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -106,6 +106,8 @@ struct tid_info { unsigned long *stid_bmap; unsigned int nstids; unsigned int stid_base; + + unsigned int nhash; unsigned int hash_base; union aopen_entry *atid_tab; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 92eee66cbc84..d2b587d1670a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2432,7 +2432,7 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) struct sk_buff *skb; int ret = 0; - len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams; + len = struct_size(flowc, mnemval, nparams); len16 = DIV_ROUND_UP(len, 16); entry = cxgb4_lookup_eotid(&adap->tids, eotid); @@ -2537,6 +2537,80 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) } } +#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST" + +int cxgb4_selftest_lb_pkt(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb4_ethtool_lb_test *lb; + int ret, i = 0, pkt_len, credits; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + u32 ctrl0, ndesc, flits; + struct sge_eth_txq *q; + u8 *sgl; + + pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); + + flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) + + sizeof(*wr), sizeof(__be64)); + ndesc = flits_to_desc(flits); + + lb = &pi->ethtool_lb; + lb->loopback = 1; + + q = &adap->sge.ethtxq[pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, -1, true); + credits = txq_avail(&q->q) - ndesc; + if (unlikely(credits < 0)) + return -ENOMEM; + + wr = (void *)&q->q.desc[q->q.pidx]; + memset(wr, 0, sizeof(struct tx_desc)); + + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(pkt_len + + sizeof(*cpl))); + wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); + wr->r3 = cpu_to_be64(0); + + cpl = (void *)(wr + 1); + sgl = (u8 *)(cpl + 1); + + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | + TXPKT_INTF_V(pi->tx_chan + 4); + + cpl->ctrl0 = htonl(ctrl0); + cpl->pack = htons(0); + cpl->len = htons(pkt_len); + cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); + + eth_broadcast_addr(sgl); + i += ETH_ALEN; + ether_addr_copy(&sgl[i], netdev->dev_addr); + i += ETH_ALEN; + + snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s", + CXGB4_SELFTEST_LB_STR); + + init_completion(&lb->completion); + txq_advance(&q->q, ndesc); + cxgb4_ring_tx_db(adap, &q->q, ndesc); + + /* wait for the pkt to return */ + ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); + if (!ret) + ret = -ETIMEDOUT; + else + ret = lb->result; + + lb->loopback = 0; + + return ret; +} + /** * ctrl_xmit - send a packet through an SGE control Tx queue * @q: the control queue @@ -3414,6 +3488,31 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, t4_sge_eth_txq_egress_update(adapter, txq, -1); } +static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si) +{ + struct adapter *adap = pi->adapter; + struct cxgb4_ethtool_lb_test *lb; + struct sge *s = &adap->sge; + struct net_device *netdev; + u8 *data; + int i; + + netdev = adap->port[pi->port_id]; + lb = &pi->ethtool_lb; + data = si->va + s->pktshift; + + i = ETH_ALEN; + if (!ether_addr_equal(data + i, netdev->dev_addr)) + return -1; + + i += ETH_ALEN; + if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR)) + lb->result = -EIO; + + complete(&lb->completion); + return 0; +} + /** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet @@ -3437,6 +3536,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct port_info *pi; int ret = 0; + pi = netdev_priv(q->netdev); /* If we're looking at TX Queue CIDX Update, handle that separately * and return. */ @@ -3464,6 +3564,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (err_vec) rxq->stats.bad_rx_pkts++; + if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { + ret = cxgb4_validate_lb_pkt(pi, si); + if (!ret) + return 0; + } + if (((pkt->l2info & htonl(RXF_TCP_F)) || tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { @@ -3477,7 +3583,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } - pi = netdev_priv(q->netdev); /* Handle PTP Event Rx packet */ if (unlikely(pi->ptp_enable)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index cbe72ed27b1e..e617e4aabbcc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -55,7 +55,7 @@ struct smt_data *t4_init_smt(void) for (i = 0; i < s->smt_size; ++i) { s->smtab[i].idx = i; s->smtab[i].state = SMT_STATE_UNUSED; - memset(&s->smtab[i].src_mac, 0, ETH_ALEN); + eth_zero_addr(s->smtab[i].src_mac); spin_lock_init(&s->smtab[i].lock); s->smtab[i].refcnt = 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index ad522f822cc2..8a56491bb034 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3752,7 +3752,6 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * t4_load_phy_fw - download port PHY firmware * @adap: the adapter * @win: the PCI-E Memory Window index to use for t4_memory_rw() - * @win_lock: the lock to use to guard the memory copy * @phy_fw_version: function to check PHY firmware versions * @phy_fw_data: the PHY firmware image to write * @phy_fw_size: image size @@ -3761,9 +3760,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * @phy_fw_version is supplied, then it will be used to determine if * it's necessary to perform the transfer by comparing the version * of any existing adapter PHY firmware with that of the passed in - * PHY firmware image. If @win_lock is non-NULL then it will be used - * around the call to t4_memory_rw() which transfers the PHY firmware - * to the adapter. + * PHY firmware image. * * A negative error number will be returned if an error occurs. If * version number support is available and there's no need to upgrade @@ -3775,14 +3772,13 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * contents. Thus, loading PHY firmware on such adapters must happen * after any FW_RESET_CMDs ... */ -int t4_load_phy_fw(struct adapter *adap, - int win, spinlock_t *win_lock, +int t4_load_phy_fw(struct adapter *adap, int win, int (*phy_fw_version)(const u8 *, size_t), const u8 *phy_fw_data, size_t phy_fw_size) { + int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; unsigned long mtype = 0, maddr = 0; u32 param, val; - int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; int ret; /* If we have version number support, then check to see if the adapter @@ -3822,13 +3818,9 @@ int t4_load_phy_fw(struct adapter *adap, /* Copy the supplied PHY Firmware image to the adapter memory location * allocated by the adapter firmware. */ - if (win_lock) - spin_lock_bh(win_lock); ret = t4_memory_rw(adap, win, mtype, maddr, phy_fw_size, (__be32 *)phy_fw_data, T4_MEMORY_WRITE); - if (win_lock) - spin_unlock_bh(win_lock); if (ret) return ret; @@ -7719,6 +7711,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id + * @viid_mirror: the mirror VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change @@ -7729,10 +7722,11 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok) + unsigned int viid_mirror, int mtu, int promisc, int all_multi, + int bcast, int vlanex, bool sleep_ok) { - struct fw_vi_rxmode_cmd c; + struct fw_vi_rxmode_cmd c, c_mirror; + int ret; /* convert to FW values */ if (mtu < 0) @@ -7757,7 +7751,24 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + + if (viid_mirror) { + memcpy(&c_mirror, &c, sizeof(c_mirror)); + c_mirror.op_to_viid = + cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid_mirror)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + if (ret) + return ret; + + if (viid_mirror) + ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror), + NULL, sleep_ok); + + return ret; } /** @@ -9719,6 +9730,22 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) return 0; } +int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf, + u16 *mirror_viid) +{ + int ret; + + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL, + NULL, NULL); + if (ret < 0) + return ret; + + if (mirror_viid) + *mirror_viid = ret; + + return 0; +} + /** * t4_read_cimq_cfg - read CIM queue configuration * @adap: the adapter @@ -10481,3 +10508,280 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); } + +/** + * modify_device_id - Modifies the device ID of the Boot BIOS image + * @device_id: the device ID to write. + * @boot_data: the boot image to modify. + * + * Write the supplied device ID to the boot BIOS image. + */ +static void modify_device_id(int device_id, u8 *boot_data) +{ + struct cxgb4_pcir_data *pcir_header; + struct legacy_pci_rom_hdr *header; + u8 *cur_header = boot_data; + u16 pcir_offset; + + /* Loop through all chained images and change the device ID's */ + do { + header = (struct legacy_pci_rom_hdr *)cur_header; + pcir_offset = le16_to_cpu(header->pcir_offset); + pcir_header = (struct cxgb4_pcir_data *)(cur_header + + pcir_offset); + + /** + * Only modify the Device ID if code type is Legacy or HP. + * 0x00: Okay to modify + * 0x01: FCODE. Do not modify + * 0x03: Okay to modify + * 0x04-0xFF: Do not modify + */ + if (pcir_header->code_type == CXGB4_HDR_CODE1) { + u8 csum = 0; + int i; + + /** + * Modify Device ID to match current adatper + */ + pcir_header->device_id = cpu_to_le16(device_id); + + /** + * Set checksum temporarily to 0. + * We will recalculate it later. + */ + header->cksum = 0x0; + + /** + * Calculate and update checksum + */ + for (i = 0; i < (header->size512 * 512); i++) + csum += cur_header[i]; + + /** + * Invert summed value to create the checksum + * Writing new checksum value directly to the boot data + */ + cur_header[7] = -csum; + + } else if (pcir_header->code_type == CXGB4_HDR_CODE2) { + /** + * Modify Device ID to match current adatper + */ + pcir_header->device_id = cpu_to_le16(device_id); + } + + /** + * Move header pointer up to the next image in the ROM. + */ + cur_header += header->size512 * 512; + } while (!(pcir_header->indicator & CXGB4_HDR_INDI)); +} + +/** + * t4_load_boot - download boot flash + * @adap: the adapter + * @boot_data: the boot image to write + * @boot_addr: offset in flash to write boot_data + * @size: image size + * + * Write the supplied boot image to the card's serial flash. + * The boot image has the following sections: a 28-byte header and the + * boot image. + */ +int t4_load_boot(struct adapter *adap, u8 *boot_data, + unsigned int boot_addr, unsigned int size) +{ + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + unsigned int boot_sector = (boot_addr * 1024); + struct cxgb4_pci_exp_rom_header *header; + struct cxgb4_pcir_data *pcir_header; + int pcir_offset; + unsigned int i; + u16 device_id; + int ret, addr; + + /** + * Make sure the boot image does not encroach on the firmware region + */ + if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { + dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n"); + return -EFBIG; + } + + /* Get boot header */ + header = (struct cxgb4_pci_exp_rom_header *)boot_data; + pcir_offset = le16_to_cpu(header->pcir_offset); + /* PCIR Data Structure */ + pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset]; + + /** + * Perform some primitive sanity testing to avoid accidentally + * writing garbage over the boot sectors. We ought to check for + * more but it's not worth it for now ... + */ + if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { + dev_err(adap->pdev_dev, "boot image too small/large\n"); + return -EFBIG; + } + + if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) { + dev_err(adap->pdev_dev, "Boot image missing signature\n"); + return -EINVAL; + } + + /* Check PCI header signature */ + if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) { + dev_err(adap->pdev_dev, "PCI header missing signature\n"); + return -EINVAL; + } + + /* Check Vendor ID matches Chelsio ID*/ + if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) { + dev_err(adap->pdev_dev, "Vendor ID missing signature\n"); + return -EINVAL; + } + + /** + * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, + * and Boot configuration data sections. These 3 boot sections span + * sectors 0 to 7 in flash and live right before the FW image location. + */ + i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size); + ret = t4_flash_erase_sectors(adap, boot_sector >> 16, + (boot_sector >> 16) + i - 1); + + /** + * If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter option ROM file + */ + if (ret || size == 0) + goto out; + /* Retrieve adapter's device ID */ + pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id); + /* Want to deal with PF 0 so I strip off PF 4 indicator */ + device_id = device_id & 0xf0ff; + + /* Check PCIE Device ID */ + if (le16_to_cpu(pcir_header->device_id) != device_id) { + /** + * Change the device ID in the Boot BIOS image to match + * the Device ID of the current adapter. + */ + modify_device_id(device_id, boot_data); + } + + /** + * Skip over the first SF_PAGE_SIZE worth of data and write it after + * we finish copying the rest of the boot image. This will ensure + * that the BIOS boot header will only be written if the boot image + * was written in full. + */ + addr = boot_sector; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + boot_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, + (const u8 *)header); + +out: + if (ret) + dev_err(adap->pdev_dev, "boot image load failed, error %d\n", + ret); + return ret; +} + +/** + * t4_flash_bootcfg_addr - return the address of the flash + * optionrom configuration + * @adapter: the adapter + * + * Return the address within the flash where the OptionROM Configuration + * is stored, or an error if the device FLASH is too small to contain + * a OptionROM Configuration. + */ +static int t4_flash_bootcfg_addr(struct adapter *adapter) +{ + /** + * If the device FLASH isn't large enough to hold a Firmware + * Configuration File, return an error. + */ + if (adapter->params.sf_size < + FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) + return -ENOSPC; + + return FLASH_BOOTCFG_START; +} + +int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + struct cxgb4_bootcfg_data *header; + unsigned int flash_cfg_start_sec; + unsigned int addr, npad; + int ret, i, n, cfg_addr; + + cfg_addr = t4_flash_bootcfg_addr(adap); + if (cfg_addr < 0) + return cfg_addr; + + addr = cfg_addr; + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_BOOTCFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n", + FLASH_BOOTCFG_MAX_SIZE); + return -EFBIG; + } + + header = (struct cxgb4_bootcfg_data *)cfg_data; + if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) { + dev_err(adap->pdev_dev, "Wrong bootcfg signature\n"); + ret = -EINVAL; + goto out; + } + + i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE, + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + + /** + * If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter OptionROM Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + n = min_t(u32, size - i, SF_PAGE_SIZE); + + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + + npad = ((size + 4 - 1) & ~3) - size; + for (i = 0; i < npad; i++) { + u8 data = 0; + + ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data); + if (ret) + goto out; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "boot config data %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 4a9fcd6c226c..065c01c654ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -563,6 +563,12 @@ #define AIVEC_V(x) ((x) << AIVEC_S) #define PCIE_PF_CLI_A 0x44 + +#define PCIE_PF_EXPROM_OFST_A 0x4c +#define OFFSET_S 10 +#define OFFSET_M 0x3fffU +#define OFFSET_G(x) (((x) >> OFFSET_S) & OFFSET_M) + #define PCIE_INT_CAUSE_A 0x3004 #define UNXSPLCPLERR_S 29 @@ -3038,6 +3044,10 @@ #define HASHTIDSIZE_M 0x3fU #define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M) +#define HASHTBLSIZE_S 3 +#define HASHTBLSIZE_M 0x1ffffU +#define HASHTBLSIZE_G(x) (((x) >> HASHTBLSIZE_S) & HASHTBLSIZE_M) + #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 3782e48dada2..f55105a4112f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -562,7 +562,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, unsigned int); void t4vf_free_sge_resources(struct adapter *); -int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *); int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, const struct pkt_gl *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index a7641be9094f..dbe8ee7e0e21 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2916,6 +2916,39 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { #endif }; +/** + * cxgb4vf_get_port_mask - Get port mask for the VF based on mac + * address stored on the adapter + * @adapter: The adapter + * + * Find the the port mask for the VF based on the index of mac + * address stored in the adapter. If no mac address is stored on + * the adapter for the VF, use the port mask received from the + * firmware. + */ +static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter) +{ + unsigned int naddr = 1, pidx = 0; + unsigned int pmask, rmask = 0; + u8 mac[ETH_ALEN]; + int err; + + pmask = adapter->params.vfres.pmask; + while (pmask) { + if (pmask & 1) { + err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac); + if (!err && !is_zero_ether_addr(mac)) + rmask |= (1 << pidx); + } + pmask >>= 1; + pidx++; + } + if (!rmask) + rmask = adapter->params.vfres.pmask; + + return rmask; +} + /* * "Probe" a device: initialize a device and construct all kernel and driver * state needed to manage the device. This routine is called "init_one" in @@ -2924,13 +2957,12 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { static int cxgb4vf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int pci_using_dac; - int err, pidx; - unsigned int pmask; struct adapter *adapter; - struct port_info *pi; struct net_device *netdev; - unsigned int pf; + struct port_info *pi; + unsigned int pmask; + int pci_using_dac; + int err, pidx; /* * Initialize generic PCI device state. @@ -3073,8 +3105,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, /* * Allocate our "adapter ports" and stitch everything together. */ - pmask = adapter->params.vfres.pmask; - pf = t4vf_get_pf_from_vf(adapter); + pmask = cxgb4vf_get_port_mask(adapter); for_each_port(adapter, pidx) { int port_id, viid; u8 mac[ETH_ALEN]; @@ -3157,7 +3188,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, goto err_free_dev; } - err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac); + err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac); if (err) { dev_err(&pdev->dev, "unable to determine MAC ACL address, " diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8c3d6e11a4bf..95657da0aa4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1154,7 +1154,7 @@ static inline void txq_advance(struct sge_txq *tq, unsigned int n) * * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. */ -int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid; u64 cntrl, *end; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 57cfd10a99ec..03777145efec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -415,7 +415,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_update_port_info(struct port_info *pi); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); -int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port, unsigned int *naddr, u8 *addr); int t4vf_get_vf_vlan_acl(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index a31b87390b50..cd8f9a481d73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -2187,14 +2187,14 @@ int t4vf_prep_adapter(struct adapter *adapter) * t4vf_get_vf_mac_acl - Get the MAC address to be set to * the VI of this VF. * @adapter: The adapter - * @pf: The pf associated with vf + * @port: The port associated with vf * @naddr: the number of ACL MAC addresses returned in addr * @addr: Placeholder for MAC addresses * * Find the MAC address to be set to the VF's VI. The requested MAC address * is from the host OS via callback in the PF driver. */ -int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port, unsigned int *naddr, u8 *addr) { struct fw_acl_mac_cmd cmd; @@ -2212,7 +2212,7 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, if (cmd.nmac < *naddr) *naddr = cmd.nmac; - switch (pf) { + switch (port) { case 3: memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); break; |