From 7b4ccb3c466f62bbf2f4dd5d6a143d945a6f3051 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 25 Apr 2017 19:36:25 +0200 Subject: soc: renesas: Provide dummy rcar_rst_read_mode_pins() for compile-testing If the R-Car RST driver is not included, compile-testing R-Car clock drivers fails with a link error: undefined reference to `rcar_rst_read_mode_pins' To fix this, provide a dummy version. Use the exact same test logic as in drivers/soc/renesas/Makefile, as there is no Kconfig symbol (yet) to control compilation of the R-Car RST driver. Fixes: 527c02f66d263d2e ("soc: renesas: Add R-Car RST driver") Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman --- include/linux/soc/renesas/rcar-rst.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h index a18e0783946b..787e7ad53d45 100644 --- a/include/linux/soc/renesas/rcar-rst.h +++ b/include/linux/soc/renesas/rcar-rst.h @@ -1,6 +1,11 @@ #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ #define __LINUX_SOC_RENESAS_RCAR_RST_H__ +#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \ + defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796) int rcar_rst_read_mode_pins(u32 *mode); +#else +static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } +#endif #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ -- cgit v1.2.3 From 9b3eb54106cf6acd03f07cf0ab01c13676a226c2 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 3 May 2017 16:43:19 +0200 Subject: xfrm: fix stack access out of bounds with CONFIG_XFRM_SUB_POLICY When CONFIG_XFRM_SUB_POLICY=y, xfrm_dst stores a copy of the flowi for that dst. Unfortunately, the code that allocates and fills this copy doesn't care about what type of flowi (flowi, flowi4, flowi6) gets passed. In multiple code paths (from raw_sendmsg, from TCP when replying to a FIN, in vxlan, geneve, and gre), the flowi that gets passed to xfrm is actually an on-stack flowi4, so we end up reading stuff from the stack past the end of the flowi4 struct. Since xfrm_dst->origin isn't used anywhere following commit ca116922afa8 ("xfrm: Eliminate "fl" and "pol" args to xfrm_bundle_ok()."), just get rid of it. xfrm_dst->partner isn't used either, so get rid of that too. Fixes: 9d6ec938019c ("ipv4: Use flowi4 in public route lookup interfaces.") Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 10 ---------- net/xfrm/xfrm_policy.c | 47 ----------------------------------------------- 2 files changed, 57 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6793a30c66b1..7e7e2b0d2915 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -979,10 +979,6 @@ struct xfrm_dst { struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; -#ifdef CONFIG_XFRM_SUB_POLICY - struct flowi *origin; - struct xfrm_selector *partner; -#endif u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; @@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); -#ifdef CONFIG_XFRM_SUB_POLICY - kfree(xdst->origin); - xdst->origin = NULL; - kfree(xdst->partner); - xdst->partner = NULL; -#endif } #endif diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b00a1d5a7f52..ed4e52d95172 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1797,43 +1797,6 @@ free_dst: goto out; } -#ifdef CONFIG_XFRM_SUB_POLICY -static int xfrm_dst_alloc_copy(void **target, const void *src, int size) -{ - if (!*target) { - *target = kmalloc(size, GFP_ATOMIC); - if (!*target) - return -ENOMEM; - } - - memcpy(*target, src, size); - return 0; -} -#endif - -static int xfrm_dst_update_parent(struct dst_entry *dst, - const struct xfrm_selector *sel) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->partner), - sel, sizeof(*sel)); -#else - return 0; -#endif -} - -static int xfrm_dst_update_origin(struct dst_entry *dst, - const struct flowi *fl) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); -#else - return 0; -#endif -} - static int xfrm_expand_policies(const struct flowi *fl, u16 family, struct xfrm_policy **pols, int *num_pols, int *num_xfrms) @@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, xdst = (struct xfrm_dst *)dst; xdst->num_xfrms = err; - if (num_pols > 1) - err = xfrm_dst_update_parent(dst, &pols[1]->selector); - else - err = xfrm_dst_update_origin(dst, fl); - if (unlikely(err)) { - dst_free(dst); - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); - return ERR_PTR(err); - } - xdst->num_pols = num_pols; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); -- cgit v1.2.3 From df3ed932394488e57e72dd0e73c224d1804fdc8f Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 11 May 2017 10:15:10 -0500 Subject: Partially Revert "of: fix sparse warnings in fdt, irq, reserved mem, and resolver code" A change to function pointers that was meant to address a sparse warning turned out to cause hundreds of new gcc-7 warnings: include/linux/of_irq.h:11:13: error: type qualifiers ignored on function return type [-Werror=ignored-qualifiers] drivers/of/of_reserved_mem.c: In function '__reserved_mem_init_node': drivers/of/of_reserved_mem.c:200:7: error: type qualifiers ignored on function return type [-Werror=ignored-qualifiers] int const (*initfn)(struct reserved_mem *rmem) = i->data; Turns out the sparse warnings were spurious and have been fixed in upstream sparse since 0.5.0 in commit "sparse: treat function pointers as pointers to const data". This partially reverts commit 17a70355ea576843a7ac851f1db26872a50b2850. Fixes: 17a70355ea57 ("of: fix sparse warnings in fdt, irq, reserved mem, and resolver code") Reported-by: Arnd Bergmann Signed-off-by: Rob Herring --- drivers/of/of_reserved_mem.c | 2 +- include/linux/of_irq.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 4dec07ea510f..d507c3569a88 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -197,7 +197,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) const struct of_device_id *i; for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { - int const (*initfn)(struct reserved_mem *rmem) = i->data; + reservedmem_of_init_fn initfn = i->data; const char *compat = i->compatible; if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index ec6b11deb773..1e0deb8e8494 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -8,7 +8,7 @@ #include #include -typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *); +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); /* * Workarounds only applied to 32bit powermac machines -- cgit v1.2.3 From f5705aa8cfed142d980ecac12bee0d81b756479e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 13 May 2017 16:31:05 -0700 Subject: dax, xfs, ext4: compile out iomap-dax paths in the FS_DAX=n case Tetsuo reports: fs/built-in.o: In function `xfs_file_iomap_end': xfs_iomap.c:(.text+0xe0ef9): undefined reference to `put_dax' fs/built-in.o: In function `xfs_file_iomap_begin': xfs_iomap.c:(.text+0xe1a7f): undefined reference to `dax_get_by_host' make: *** [vmlinux] Error 1 $ grep DAX .config CONFIG_DAX=m # CONFIG_DEV_DAX is not set # CONFIG_FS_DAX is not set When FS_DAX=n we can/must throw away the dax code in filesystems. Implement 'fs_' versions of dax_get_by_host() and put_dax() that are nops in the FS_DAX=n case. Cc: Cc: Cc: Jan Kara Cc: "Theodore Ts'o" Cc: "Darrick J. Wong" Cc: Ross Zwisler Tested-by: Tony Luck Fixes: ef51042472f5 ("block, dax: move 'select DAX' from BLOCK to FS_DAX") Reported-by: Tetsuo Handa Signed-off-by: Dan Williams --- fs/ext2/inode.c | 4 ++-- fs/ext4/inode.c | 4 ++-- fs/xfs/xfs_iomap.c | 4 ++-- include/linux/dax.h | 34 +++++++++++++++++++++++++++------- 4 files changed, 33 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 26d77f9f8c12..2dcbd5698884 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -817,7 +817,7 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, iomap->bdev = bdev; iomap->offset = (u64)first_block << blkbits; if (blk_queue_dax(bdev->bd_queue)) - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); else iomap->dax_dev = NULL; @@ -841,7 +841,7 @@ static int ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { - put_dax(iomap->dax_dev); + fs_put_dax(iomap->dax_dev); if (iomap->type == IOMAP_MAPPED && written < length && (flags & IOMAP_WRITE)) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5834c4d76be8..1bd0bfa547f6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3412,7 +3412,7 @@ retry: bdev = inode->i_sb->s_bdev; iomap->bdev = bdev; if (blk_queue_dax(bdev->bd_queue)) - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); else iomap->dax_dev = NULL; iomap->offset = first_block << blkbits; @@ -3447,7 +3447,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, int blkbits = inode->i_blkbits; bool truncate = false; - put_dax(iomap->dax_dev); + fs_put_dax(iomap->dax_dev); if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) return 0; diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index a63f61c256bd..94e5bdf7304c 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1068,7 +1068,7 @@ xfs_file_iomap_begin( /* optionally associate a dax device with the iomap bdev */ bdev = iomap->bdev; if (blk_queue_dax(bdev->bd_queue)) - iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); else iomap->dax_dev = NULL; @@ -1149,7 +1149,7 @@ xfs_file_iomap_end( unsigned flags, struct iomap *iomap) { - put_dax(iomap->dax_dev); + fs_put_dax(iomap->dax_dev); if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, length, written, iomap); diff --git a/include/linux/dax.h b/include/linux/dax.h index 00ebac854bb7..5ec1f6c47716 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -18,6 +18,20 @@ struct dax_operations { void **, pfn_t *); }; +#if IS_ENABLED(CONFIG_DAX) +struct dax_device *dax_get_by_host(const char *host); +void put_dax(struct dax_device *dax_dev); +#else +static inline struct dax_device *dax_get_by_host(const char *host) +{ + return NULL; +} + +static inline void put_dax(struct dax_device *dax_dev) +{ +} +#endif + int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) int __bdev_dax_supported(struct super_block *sb, int blocksize); @@ -25,23 +39,29 @@ static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { return __bdev_dax_supported(sb, blocksize); } + +static inline struct dax_device *fs_dax_get_by_host(const char *host) +{ + return dax_get_by_host(host); +} + +static inline void fs_put_dax(struct dax_device *dax_dev) +{ + put_dax(dax_dev); +} + #else static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { return -EOPNOTSUPP; } -#endif -#if IS_ENABLED(CONFIG_DAX) -struct dax_device *dax_get_by_host(const char *host); -void put_dax(struct dax_device *dax_dev); -#else -static inline struct dax_device *dax_get_by_host(const char *host) +static inline struct dax_device *fs_dax_get_by_host(const char *host) { return NULL; } -static inline void put_dax(struct dax_device *dax_dev) +static inline void fs_put_dax(struct dax_device *dax_dev) { } #endif -- cgit v1.2.3 From 1b9a07ee25049724ab7f7c32282fbf5452530cea Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 10 May 2017 21:32:18 +0300 Subject: {net, IB}/mlx5: Replace mlx5_vzalloc with kvzalloc Commit a7c3e901a46f ("mm: introduce kv[mz]alloc helpers") added proper implementation of mlx5_vzalloc function to the MM core. This made the mlx5_vzalloc function useless, so let's remove it. Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cq.c | 6 ++-- drivers/infiniband/hw/mlx5/mad.c | 4 +-- drivers/infiniband/hw/mlx5/main.c | 6 ++-- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 32 ++++++++++---------- drivers/infiniband/hw/mlx5/srq.c | 4 +-- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 8 ++--- .../net/ethernet/mellanox/mlx5/core/en_common.c | 4 +-- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 24 ++++++--------- .../ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 33 ++++++++++----------- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 24 ++++++--------- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 13 ++++----- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 6 ++-- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 10 +++---- drivers/net/ethernet/mellanox/mlx5/core/ipoib.c | 2 +- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 4 +-- drivers/net/ethernet/mellanox/mlx5/core/port.c | 6 ++-- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/srq.c | 14 ++++----- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 34 +++++++++------------- include/linux/mlx5/driver.h | 5 ---- 27 files changed, 111 insertions(+), 144 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 94c049b62c2f..a384d72ea3cd 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_db; @@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_buf; @@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto ex_resize; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f1b56de64871..95db929bdc34 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters_ext *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; @@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d45772da0963..b6991204e5df 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2263,7 +2263,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler || !spec) { err = -ENOMEM; @@ -3456,7 +3456,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev, __be32 val; int ret, i; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3485,7 +3485,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, int ret, i; int offset = port->cnts.num_q_counters; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..763bb5b36144 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1110,7 +1110,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*pas) * ((npages + 1) / 2) * 2; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 93959e1e43a3..d17aad0f54c0 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_umem; @@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_buf; @@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, return err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_umem; @@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, u32 rq_pas_size = get_rq_pas_size(qpc); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1372,7 +1372,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1633,7 +1633,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2164,7 +2164,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2189,7 +2189,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2434,7 +2434,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2479,7 +2479,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4294,7 +4294,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4321,7 +4321,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_rq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4625,7 +4625,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4855,7 +4855,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, return ERR_PTR(-ENOMEM); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err; @@ -4934,7 +4934,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, return -EOPNOTSUPP; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7cb145f9a6db..43707b101f47 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_umem; } - in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); + in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_umem; @@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); - in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages); + in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_buf; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index e94a9532e218..de40b6cfee95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -405,7 +405,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *out; int err; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return param; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index c8a005326e30..f4017c06ddd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -180,9 +180,8 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; } @@ -237,7 +236,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in || !ft->g) { kvfree(ft->g); kvfree(in); @@ -481,9 +480,8 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, struct mlx5_flow_table *ft; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index f1f17f7a3cd0..46e56ec4c26f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -65,7 +65,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *in; int err; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -147,7 +147,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ce7b09d72ff6..e0dd1048c966 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1045,7 +1045,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, (hfunc != ETH_RSS_HASH_TOP)) return -EINVAL; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 576d6787b484..936fc6d96c18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -218,11 +218,9 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); - if (!spec) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) return -ENOMEM; - } if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) mlx5e_vport_context_update_vlans(priv); @@ -660,11 +658,9 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); - if (!spec) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) return ERR_PTR(-ENOMEM); - } if (proto) { spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; @@ -742,7 +738,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) sizeof(*ft->g), GFP_KERNEL); if (!ft->g) return -ENOMEM; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); return -ENOMEM; @@ -853,11 +849,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, u8 *mc_dmac; u8 *mv_dmac; - spec = mlx5_vzalloc(sizeof(*spec)); - if (!spec) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) return -ENOMEM; - } mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dmac_47_16); @@ -917,7 +911,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL); if (!ft->g) return -ENOMEM; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); return -ENOMEM; @@ -1072,7 +1066,7 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int err; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 85bf4a389295..bdd82c9b3992 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -296,7 +296,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return ERR_PTR(-ENOMEM); err = set_flow_attrs(spec->match_criteria, spec->match_value, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a61b71b6fff3..edc485e489cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -252,9 +252,9 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) void *out; u32 *in; - in = mlx5_vzalloc(sz); + in = kvzalloc(sz, GFP_KERNEL); if (!in) - goto free_out; + return; MLX5_SET(ppcnt_reg, in, local_port, 1); @@ -288,7 +288,6 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_REG_PPCNT, 0, 0); } -free_out: kvfree(in); } @@ -314,7 +313,7 @@ static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) return; - in = mlx5_vzalloc(sz); + in = kvzalloc(sz, GFP_KERNEL); if (!in) return; @@ -503,7 +502,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, if (!MLX5E_VALID_NUM_MTTS(npages)) return -EINVAL; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -711,7 +710,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rq->wq_ctrl.buf.npages; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -748,7 +747,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -776,7 +775,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -805,7 +804,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1134,7 +1133,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * csp->wq_ctrl->buf.npages; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1182,7 +1181,7 @@ static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1496,7 +1495,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.frag_buf.npages; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2091,7 +2090,7 @@ mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) int i; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2210,7 +2209,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int err; inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2433,7 +2432,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) int ix; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2850,7 +2849,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) int tt; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2889,7 +2888,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) int ix; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 11c27e4fadf6..66a9bd635176 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1738,7 +1738,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, } flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); - parse_attr = mlx5_vzalloc(sizeof(*parse_attr)); + parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); if (!parse_attr || !flow) { err = -ENOMEM; goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ea5d8d37a75c..df0034d8f48c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -548,7 +548,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, inlen = MLX5_ST_SZ_BYTES(create_eq_in) + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_buf; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2e34d95ea776..81dfcd90b1f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -248,11 +248,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, if (rx_rule) match_header |= MLX5_MATCH_MISC_PARAMETERS; - spec = mlx5_vzalloc(sizeof(*spec)); - if (!spec) { - esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) return NULL; - } + dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dmac_47_16); dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -350,10 +349,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) return -EOPNOTSUPP; } - flow_group_in = mlx5_vzalloc(inlen); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; - memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); @@ -961,7 +959,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, return -EOPNOTSUPP; } - flow_group_in = mlx5_vzalloc(inlen); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1078,7 +1076,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, return -EOPNOTSUPP; } - flow_group_in = mlx5_vzalloc(inlen); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1241,11 +1239,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", vport->vport, vport->info.vlan, vport->info.qos); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { err = -ENOMEM; - esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", - vport->vport, err); goto out; } @@ -1322,11 +1318,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", vport->vport, vport->info.vlan, vport->info.qos); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { err = -ENOMEM; - esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", - vport->vport, err); goto out; } @@ -2158,7 +2152,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, if (!LEGAL_VPORT(esw, vport)) return -EINVAL; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f991f669047e..3795943ef2d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -311,9 +311,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn struct mlx5_flow_spec *spec; void *misc; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); flow_rule = ERR_PTR(-ENOMEM); goto out; } @@ -401,9 +400,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) struct mlx5_flow_spec *spec; int err = 0; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); err = -ENOMEM; goto out; } @@ -488,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) u32 *flow_group_in; esw_debug(esw->dev, "Create offloads FDB Tables\n"); - flow_group_in = mlx5_vzalloc(inlen); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -631,7 +629,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) int err = 0; int nvports = priv->sriov.num_vfs + 2; - flow_group_in = mlx5_vzalloc(inlen); + flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -675,9 +673,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) struct mlx5_flow_spec *spec; void *misc; - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - esw_warn(esw->dev, "Failed to alloc match parameters\n"); flow_rule = ERR_PTR(-ENOMEM); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 19e3d2fc2099..b27c59af9640 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -235,11 +235,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, u32 *in; int err; - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(dev, "failed to allocate inbox\n"); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) return -ENOMEM; - } MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); MLX5_SET(set_fte_in, in, op_mod, opmod); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b8a176503d38..20a50f23fb1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -376,11 +376,9 @@ static void del_rule(struct fs_node *node) int err; bool update_fte = false; - match_value = mlx5_vzalloc(match_len); - if (!match_value) { - mlx5_core_warn(dev, "failed to allocate inbox\n"); + match_value = kvzalloc(match_len, GFP_KERNEL); + if (!match_value) return; - } fs_get_obj(rule, node); fs_get_obj(fte, rule->node.parent); @@ -1159,7 +1157,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, if (!ft->autogroup.active) return ERR_PTR(-ENOENT); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return ERR_PTR(-ENOMEM); @@ -1778,7 +1776,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering struct mlx5_flow_namespace *ns; /* Create the root namespace */ - root_ns = mlx5_vzalloc(sizeof(*root_ns)); + root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c index 019c230da498..9b397fe3f159 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c @@ -98,7 +98,7 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core void *qpc; inlen = MLX5_ST_SZ_BYTES(create_qp_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index a57d5a81eb05..efcded7ca27a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -279,7 +279,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int i; inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); @@ -376,7 +376,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, *nclaimed = 0; outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 141583daf5a2..1975d4388d4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -47,8 +47,8 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, u32 *in = NULL; void *data; - in = mlx5_vzalloc(inlen); - out = mlx5_vzalloc(outlen); + in = kvzalloc(inlen, GFP_KERNEL); + out = kvzalloc(outlen, GFP_KERNEL); if (!in || !out) goto out; @@ -454,7 +454,7 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u32 *in; int err; - in = mlx5_vzalloc(sz); + in = kvzalloc(sz, GFP_KERNEL); if (!in) { err = -ENOMEM; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index cbbcef2884be..573a6b27fed8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -527,7 +527,7 @@ int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, void *out; int err; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 3099630015d7..f774de6f5fcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -162,7 +162,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; - create_in = mlx5_vzalloc(inlen); + create_in = kvzalloc(inlen, GFP_KERNEL); if (!create_in) return -ENOMEM; @@ -221,7 +221,7 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, void *srqc; int err; - srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out)); + srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL); if (!srq_out) return -ENOMEM; @@ -256,7 +256,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; - create_in = mlx5_vzalloc(inlen); + create_in = kvzalloc(inlen, GFP_KERNEL); if (!create_in) return -ENOMEM; @@ -320,7 +320,7 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, void *xrc_srqc; int err; - xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL); if (!xrcsrq_out) return -ENOMEM; memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); @@ -357,7 +357,7 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; - create_in = mlx5_vzalloc(inlen); + create_in = kvzalloc(inlen, GFP_KERNEL); if (!create_in) return -ENOMEM; @@ -390,7 +390,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, void *bitmask; int err; - in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL); if (!in) return -ENOMEM; @@ -417,7 +417,7 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, void *rmpc; int err; - rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); + rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL); if (!rmp_out) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index a00ff49eec18..5e128d7a9ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -284,7 +284,7 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) void *bitmask; int err; - in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 15c2294dd2b4..06019d00ab7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -172,7 +172,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *out_addr; int err; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -197,11 +197,9 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, void *nic_vport_ctx; u8 *perm_mac; - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(mdev, "failed to allocate inbox\n"); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) return -ENOMEM; - } MLX5_SET(modify_nic_vport_context_in, in, field_select.permanent_address, 1); @@ -231,7 +229,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) u32 *out; int err; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -251,7 +249,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) void *in; int err; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -501,7 +499,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -521,7 +519,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -551,7 +549,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) return -EOPNOTSUPP; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -577,7 +575,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -879,11 +877,9 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_err(mdev, "failed to allocate inbox\n"); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) return -ENOMEM; - } MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1); MLX5_SET(modify_nic_vport_context_in, in, @@ -913,11 +909,9 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; - in = mlx5_vzalloc(inlen); - if (!in) { - mlx5_core_warn(mdev, "failed to allocate inbox\n"); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) return -ENOMEM; - } MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en, @@ -952,7 +946,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, int err; is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); - in = mlx5_vzalloc(in_sz); + in = kvzalloc(in_sz, GFP_KERNEL); if (!in) { err = -ENOMEM; return err; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bcdf739ee41a..c2740688d679 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -890,11 +890,6 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } -static inline void *mlx5_vzalloc(unsigned long size) -{ - return kvzalloc(size, GFP_KERNEL); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; -- cgit v1.2.3 From 0179720d6be2096b8d0a4d143254ff9e77747daa Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Sun, 7 May 2017 13:48:31 +0300 Subject: net/mlx5: Introduce trigger_health_work function Introduce new function for entering bad-health state. This function will be called from FPGA-related logic in a later patch from asynchronous event (IRQ) context, for that we change the spin lock to an IRQ-safe one. Signed-off-by: Ilan Tayari Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 32 ++++++++++++++++-------- include/linux/mlx5/driver.h | 1 + 2 files changed, 22 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d0515391d33b..c3cedb6cec3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -185,6 +185,7 @@ static void health_care(struct work_struct *work) struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; + unsigned long flags; health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); @@ -192,13 +193,13 @@ static void health_care(struct work_struct *work) mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); - spin_lock(&health->wq_lock); + spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); - spin_unlock(&health->wq_lock); + spin_unlock_irqrestore(&health->wq_lock, flags); } static const char *hsynd_str(u8 synd) @@ -269,6 +270,20 @@ static unsigned long get_next_poll_jiffies(void) return next; } +void mlx5_trigger_health_work(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + spin_lock_irqsave(&health->wq_lock, flags); + if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + queue_work(health->wq, &health->work); + else + dev_err(&dev->pdev->dev, + "new health works are not permitted at this stage\n"); + spin_unlock_irqrestore(&health->wq_lock, flags); +} + static void poll_health(unsigned long data) { struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; @@ -297,13 +312,7 @@ static void poll_health(unsigned long data) if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); - spin_lock(&health->wq_lock); - if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) - queue_work(health->wq, &health->work); - else - dev_err(&dev->pdev->dev, - "new health works are not permitted at this stage\n"); - spin_unlock(&health->wq_lock); + mlx5_trigger_health_work(dev); } } @@ -333,10 +342,11 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev) void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; - spin_lock(&health->wq_lock); + spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); - spin_unlock(&health->wq_lock); + spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index c2740688d679..a277bb36c21f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -915,6 +915,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_trigger_health_work(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); -- cgit v1.2.3 From e29341fb3a5b885a4bb5b9a38f2814ca07d3382c Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Mon, 13 Mar 2017 20:05:45 +0200 Subject: net/mlx5: FPGA, Add basic support for Innova Mellanox Innova is a NIC with ConnectX and an FPGA on the same board. The FPGA is a bump-on-the-wire and thus affects operation of the mlx5_core driver on the ConnectX ASIC. Add basic support for Innova in mlx5_core. This allows using the Innova card as a regular NIC, by detecting the FPGA capability bit, and verifying its load state before initializing ConnectX interfaces. Also detect FPGA fatal runtime failures and enter error state if they ever happen. All new FPGA-related logic is placed in its own subdirectory 'fpga', which may be built by selecting CONFIG_MLX5_FPGA. This prepares for further support of various Innova features in later patchsets. Additional details about hardware architecture will be provided as more features get submitted. Signed-off-by: Ilan Tayari Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- MAINTAINERS | 10 + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 10 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 3 + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 11 ++ drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c | 64 +++++++ drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h | 59 ++++++ .../net/ethernet/mellanox/mlx5/core/fpga/core.c | 202 +++++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/fpga/core.h | 99 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 19 +- include/linux/mlx5/device.h | 6 + include/linux/mlx5/driver.h | 5 + include/linux/mlx5/mlx5_ifc.h | 11 +- include/linux/mlx5/mlx5_ifc_fpga.h | 144 +++++++++++++++ 13 files changed, 640 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h create mode 100644 include/linux/mlx5/mlx5_ifc_fpga.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index f7d568b8f133..374ebf1b5d5d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8304,6 +8304,16 @@ W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_* +MELLANOX ETHERNET INNOVA DRIVER +M: Ilan Tayari +R: Boris Pismenny +L: netdev@vger.kernel.org +S: Supported +W: http://www.mellanox.com +Q: http://patchwork.ozlabs.org/project/netdev/list/ +F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* +F: include/linux/mlx5/mlx5_ifc_fpga.h + MELLANOX ETHERNET SWITCH DRIVERS M: Jiri Pirko M: Ido Schimmel diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index fc52d742b7f7..28cf88483ca4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -11,6 +11,16 @@ config MLX5_CORE Core driver for low level functionality of the ConnectX-4 and Connect-IB cards by Mellanox Technologies. +config MLX5_FPGA + bool "Mellanox Technologies Innova support" + depends on MLX5_CORE + ---help--- + Build support for the Innova family of network cards by Mellanox + Technologies. Innova network cards are comprised of a ConnectX chip + and an FPGA chip on one board. If you select this option, the + mlx5_core driver will include the Innova FPGA core and allow building + sandbox-specific client drivers. + config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9e644615f07a..12556c03eec4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -1,10 +1,13 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o +subdir-ccflags-y += -I$(src) mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o +mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o + mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index df0034d8f48c..01d2cd7e4746 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -35,6 +35,7 @@ #include #include #include "mlx5_core.h" +#include "fpga/core.h" #ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" #endif @@ -156,6 +157,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_FAULT"; case MLX5_EVENT_TYPE_PPS_EVENT: return "MLX5_EVENT_TYPE_PPS_EVENT"; + case MLX5_EVENT_TYPE_FPGA_ERROR: + return "MLX5_EVENT_TYPE_FPGA_ERROR"; default: return "Unrecognized event"; } @@ -476,6 +479,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) if (dev->event) dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); break; + + case MLX5_EVENT_TYPE_FPGA_ERROR: + mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); + break; + default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -693,6 +701,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pps)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); + if (MLX5_CAP_GEN(dev, fpga)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c new file mode 100644 index 000000000000..99cba644b4fc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx5_core.h" +#include "fpga/cmd.h" + +int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) +{ + u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; + + return mlx5_core_access_reg(dev, in, sizeof(in), caps, + MLX5_ST_SZ_BYTES(fpga_cap), + MLX5_REG_FPGA_CAP, 0, 0); +} + +int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query) +{ + u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0}; + u32 out[MLX5_ST_SZ_DW(fpga_ctrl)]; + int err; + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_FPGA_CTRL, 0, false); + if (err) + return err; + + query->status = MLX5_GET(fpga_ctrl, out, status); + query->admin_image = MLX5_GET(fpga_ctrl, out, flash_select_admin); + query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h new file mode 100644 index 000000000000..a74396a61bc3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_FPGA_H__ +#define __MLX5_FPGA_H__ + +#include + +enum mlx5_fpga_image { + MLX5_FPGA_IMAGE_USER = 0, + MLX5_FPGA_IMAGE_FACTORY, +}; + +enum mlx5_fpga_status { + MLX5_FPGA_STATUS_SUCCESS = 0, + MLX5_FPGA_STATUS_FAILURE = 1, + MLX5_FPGA_STATUS_IN_PROGRESS = 2, + MLX5_FPGA_STATUS_NONE = 0xFFFF, +}; + +struct mlx5_fpga_query { + enum mlx5_fpga_image admin_image; + enum mlx5_fpga_image oper_image; + enum mlx5_fpga_status status; +}; + +int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); +int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); + +#endif /* __MLX5_FPGA_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c new file mode 100644 index 000000000000..d88b332e9669 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx5_core.h" +#include "fpga/core.h" + +static const char *const mlx5_fpga_error_strings[] = { + "Null Syndrome", + "Corrupted DDR", + "Flash Timeout", + "Internal Link Error", + "Watchdog HW Failure", + "I2C Failure", + "Image Changed", + "Temperature Critical", +}; + +static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void) +{ + struct mlx5_fpga_device *fdev = NULL; + + fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); + if (!fdev) + return NULL; + + spin_lock_init(&fdev->state_lock); + fdev->state = MLX5_FPGA_STATUS_NONE; + return fdev; +} + +static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) +{ + switch (image) { + case MLX5_FPGA_IMAGE_USER: + return "user"; + case MLX5_FPGA_IMAGE_FACTORY: + return "factory"; + default: + return "unknown"; + } +} + +static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) +{ + struct mlx5_fpga_query query; + int err; + + err = mlx5_fpga_query(fdev->mdev, &query); + if (err) { + mlx5_fpga_err(fdev, "Failed to query status: %d\n", err); + return err; + } + + fdev->last_admin_image = query.admin_image; + fdev->last_oper_image = query.oper_image; + + mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n", + query.status, query.admin_image, query.oper_image); + + if (query.status != MLX5_FPGA_STATUS_SUCCESS) { + mlx5_fpga_err(fdev, "%s image failed to load; status %u\n", + mlx5_fpga_image_name(fdev->last_oper_image), + query.status); + return -EIO; + } + + return 0; +} + +int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + unsigned long flags; + int err; + + if (!fdev) + return 0; + + err = mlx5_fpga_device_load_check(fdev); + if (err) + goto out; + + err = mlx5_fpga_caps(fdev->mdev, + fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]); + if (err) + goto out; + + mlx5_fpga_info(fdev, "device %u; %s image, version %u\n", + MLX5_CAP_FPGA(fdev->mdev, fpga_device), + mlx5_fpga_image_name(fdev->last_oper_image), + MLX5_CAP_FPGA(fdev->mdev, image_version)); + +out: + spin_lock_irqsave(&fdev->state_lock, flags); + fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS; + spin_unlock_irqrestore(&fdev->state_lock, flags); + return err; +} + +int mlx5_fpga_device_init(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = NULL; + + if (!MLX5_CAP_GEN(mdev, fpga)) { + mlx5_core_dbg(mdev, "FPGA capability not present\n"); + return 0; + } + + mlx5_core_dbg(mdev, "Initializing FPGA\n"); + + fdev = mlx5_fpga_device_alloc(); + if (!fdev) + return -ENOMEM; + + fdev->mdev = mdev; + mdev->fpga = fdev; + + return 0; +} + +void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev) +{ + kfree(mdev->fpga); + mdev->fpga = NULL; +} + +static const char *mlx5_fpga_syndrome_to_string(u8 syndrome) +{ + if (syndrome < ARRAY_SIZE(mlx5_fpga_error_strings)) + return mlx5_fpga_error_strings[syndrome]; + return "Unknown"; +} + +void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + const char *event_name; + bool teardown = false; + unsigned long flags; + u8 syndrome; + + if (event != MLX5_EVENT_TYPE_FPGA_ERROR) { + mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", + event); + return; + } + + syndrome = MLX5_GET(fpga_error_event, data, syndrome); + event_name = mlx5_fpga_syndrome_to_string(syndrome); + + spin_lock_irqsave(&fdev->state_lock, flags); + switch (fdev->state) { + case MLX5_FPGA_STATUS_SUCCESS: + mlx5_fpga_warn(fdev, "Error %u: %s\n", syndrome, event_name); + teardown = true; + break; + default: + mlx5_fpga_warn_ratelimited(fdev, "Unexpected error event %u: %s\n", + syndrome, event_name); + } + spin_unlock_irqrestore(&fdev->state_lock, flags); + /* We tear-down the card's interfaces and functionality because + * the FPGA bump-on-the-wire is misbehaving and we lose ability + * to communicate with the network. User may still be able to + * recover by re-programming or debugging the FPGA + */ + if (teardown) + mlx5_trigger_health_work(fdev->mdev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h new file mode 100644 index 000000000000..c55044d66778 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_FPGA_CORE_H__ +#define __MLX5_FPGA_CORE_H__ + +#ifdef CONFIG_MLX5_FPGA + +#include "fpga/cmd.h" + +/* Represents an Innova device */ +struct mlx5_fpga_device { + struct mlx5_core_dev *mdev; + spinlock_t state_lock; /* Protects state transitions */ + enum mlx5_fpga_status state; + enum mlx5_fpga_image last_admin_image; + enum mlx5_fpga_image last_oper_image; +}; + +#define mlx5_fpga_dbg(__adev, format, ...) \ + dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define mlx5_fpga_err(__adev, format, ...) \ + dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define mlx5_fpga_warn(__adev, format, ...) \ + dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \ + dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \ + format, __func__, __LINE__, ##__VA_ARGS__) + +#define mlx5_fpga_notice(__adev, format, ...) \ + dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) + +#define mlx5_fpga_info(__adev, format, ...) \ + dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) + +int mlx5_fpga_device_init(struct mlx5_core_dev *mdev); +void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev); +int mlx5_fpga_device_start(struct mlx5_core_dev *mdev); +void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data); + +#else + +static inline int mlx5_fpga_device_init(struct mlx5_core_dev *mdev) +{ + return 0; +} + +static inline void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev) +{ +} + +static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) +{ + return 0; +} + +static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, + void *data) +{ +} + +#endif + +#endif /* __MLX5_FPGA_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f933922d5cca..ad0202cef203 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -56,6 +56,7 @@ #ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" #endif +#include "fpga/core.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); @@ -1113,10 +1114,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_disable_msix; } + err = mlx5_fpga_device_init(dev); + if (err) { + dev_err(&pdev->dev, "fpga device init failed %d\n", err); + goto err_put_uars; + } + err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_put_uars; + goto err_fpga_init; } err = alloc_comp_eqs(dev); @@ -1147,6 +1154,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_sriov; } + err = mlx5_fpga_device_start(dev); + if (err) { + dev_err(&pdev->dev, "fpga device start failed %d\n", err); + goto err_reg_dev; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1182,6 +1195,9 @@ err_affinity_hints: err_stop_eqs: mlx5_stop_eqs(dev); +err_fpga_init: + mlx5_fpga_device_cleanup(dev); + err_put_uars: mlx5_put_uars_page(dev, priv->uar); @@ -1246,6 +1262,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); + mlx5_fpga_device_cleanup(dev); mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dd9a263ed368..786a43843da9 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -300,6 +300,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, + + MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, }; enum { @@ -967,6 +969,7 @@ enum mlx5_cap_type { MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, + MLX5_CAP_FPGA, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1088,6 +1091,9 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) +#define MLX5_CAP_FPGA(mdev, cap) \ + MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a277bb36c21f..55bb712643cb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -108,6 +108,8 @@ enum { MLX5_REG_QTCT = 0x400a, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, + MLX5_REG_FPGA_CAP = 0x4022, + MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -761,6 +763,9 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; +#ifdef CONFIG_MLX5_FPGA + struct mlx5_fpga_device *fpga; +#endif #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 32de0724b400..6fa1eb6766af 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -32,6 +32,8 @@ #ifndef MLX5_IFC_H #define MLX5_IFC_H +#include "mlx5_ifc_fpga.h" + enum { MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, @@ -56,7 +58,8 @@ enum { MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, - MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, + MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, }; enum { @@ -854,7 +857,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_tc[0x4]; u8 reserved_at_1d0[0x1]; u8 dcbx[0x1]; - u8 reserved_at_1d2[0x4]; + u8 reserved_at_1d2[0x3]; + u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; @@ -2186,6 +2190,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_fpga_cap_bits fpga_cap; u8 reserved_at_0[0x8000]; }; @@ -8182,6 +8187,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_sltp_reg_bits sltp_reg; struct mlx5_ifc_mtpps_reg_bits mtpps_reg; struct mlx5_ifc_mtppse_reg_bits mtppse_reg; + struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; + struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h new file mode 100644 index 000000000000..0032d10ac6cf --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef MLX5_IFC_FPGA_H +#define MLX5_IFC_FPGA_H + +struct mlx5_ifc_fpga_shell_caps_bits { + u8 max_num_qps[0x10]; + u8 reserved_at_10[0x8]; + u8 total_rcv_credits[0x8]; + + u8 reserved_at_20[0xe]; + u8 qp_type[0x2]; + u8 reserved_at_30[0x5]; + u8 rae[0x1]; + u8 rwe[0x1]; + u8 rre[0x1]; + u8 reserved_at_38[0x4]; + u8 dc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_at_40[0x1a]; + u8 log_ddr_size[0x6]; + + u8 max_fpga_qp_msg_size[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_fpga_cap_bits { + u8 fpga_id[0x8]; + u8 fpga_device[0x18]; + + u8 register_file_ver[0x20]; + + u8 fpga_ctrl_modify[0x1]; + u8 reserved_at_41[0x5]; + u8 access_reg_query_mode[0x2]; + u8 reserved_at_48[0x6]; + u8 access_reg_modify_mode[0x2]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x20]; + + u8 image_version[0x20]; + + u8 image_date[0x20]; + + u8 image_time[0x20]; + + u8 shell_version[0x20]; + + u8 reserved_at_100[0x80]; + + struct mlx5_ifc_fpga_shell_caps_bits shell_caps; + + u8 reserved_at_380[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 sandbox_product_version[0x10]; + u8 sandbox_product_id[0x10]; + + u8 sandbox_basic_caps[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 sandbox_extended_caps_len[0x10]; + + u8 sandbox_extended_caps_addr[0x40]; + + u8 fpga_ddr_start_addr[0x40]; + + u8 fpga_cr_space_start_addr[0x40]; + + u8 fpga_ddr_size[0x20]; + + u8 fpga_cr_space_size[0x20]; + + u8 reserved_at_500[0x300]; +}; + +struct mlx5_ifc_fpga_ctrl_bits { + u8 reserved_at_0[0x8]; + u8 operation[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 reserved_at_20[0x8]; + u8 flash_select_admin[0x8]; + u8 reserved_at_30[0x8]; + u8 flash_select_oper[0x8]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, + MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, + MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, + MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, + MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, + MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, + MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, +}; + +struct mlx5_ifc_fpga_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x80]; +}; + +#endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From d91fc59cd77c719f33eda65c194ad8f95a055190 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sun, 7 May 2017 22:01:55 +0800 Subject: netfilter: introduce nf_conntrack_helper_put helper function And convert module_put invocation to nf_conntrack_helper_put, this is prepared for the followup patch, which will add a refcnt for cthelper, so we can reject the deleting request when cthelper is in use. Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_helper.h | 2 ++ net/netfilter/nf_conntrack_helper.c | 6 ++++++ net/netfilter/nft_ct.c | 4 ++-- net/netfilter/xt_CT.c | 6 +++--- net/openvswitch/conntrack.c | 4 ++-- 5 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index e04fa7691e5d..c1c12411103a 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -79,6 +79,8 @@ struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum); +void nf_conntrack_helper_put(struct nf_conntrack_helper *helper); + void nf_ct_helper_init(struct nf_conntrack_helper *helper, u16 l3num, u16 protonum, const char *name, u16 default_port, u16 spec_port, u32 id, diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 3a60efa7799b..e17006b6e434 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -181,6 +181,12 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) } EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); +void nf_conntrack_helper_put(struct nf_conntrack_helper *helper) +{ + module_put(helper->me); +} +EXPORT_SYMBOL_GPL(nf_conntrack_helper_put); + struct nf_conn_help * nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a34ceb38fc55..1678e9e75e8e 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj) struct nft_ct_helper_obj *priv = nft_obj_data(obj); if (priv->helper4) - module_put(priv->helper4->me); + nf_conntrack_helper_put(priv->helper4); if (priv->helper6) - module_put(priv->helper6->me); + nf_conntrack_helper_put(priv->helper6); } static void nft_ct_helper_obj_eval(struct nft_object *obj, diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index bb7ad82dcd56..623ef37de886 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); if (help == NULL) { - module_put(helper->me); + nf_conntrack_helper_put(helper); return -ENOMEM; } @@ -263,7 +263,7 @@ out: err4: help = nfct_help(ct); if (help) - module_put(help->helper->me); + nf_conntrack_helper_put(help->helper); err3: nf_ct_tmpl_free(ct); err2: @@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par, if (ct) { help = nfct_help(ct); if (help) - module_put(help->helper->me); + nf_conntrack_helper_put(help->helper); nf_ct_netns_put(par->net, par->family); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index bf602e33c40a..08679ebb3068 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); if (!help) { - module_put(helper->me); + nf_conntrack_helper_put(helper); return -ENOMEM; } @@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a) static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) { if (ct_info->helper) - module_put(ct_info->helper->me); + nf_conntrack_helper_put(ct_info->helper); if (ct_info->ct) nf_ct_tmpl_free(ct_info->ct); } -- cgit v1.2.3 From 9338d7b4418e9996a7642867d8f6b482a6040ed6 Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sun, 7 May 2017 22:01:56 +0800 Subject: netfilter: nfnl_cthelper: reject del request if helper obj is in use We can still delete the ct helper even if it is in use, this will cause a use-after-free error. In more detail, I mean: # nfct helper add ssdp inet udp # iptables -t raw -A OUTPUT -p udp -j CT --helper ssdp # nfct helper delete ssdp //--> oops, succeed! BUG: unable to handle kernel paging request at 000026ca IP: 0x26ca [...] Call Trace: ? ipv4_helper+0x62/0x80 [nf_conntrack_ipv4] nf_hook_slow+0x21/0xb0 ip_output+0xe9/0x100 ? ip_fragment.constprop.54+0xc0/0xc0 ip_local_out+0x33/0x40 ip_send_skb+0x16/0x80 udp_send_skb+0x84/0x240 udp_sendmsg+0x35d/0xa50 So add reference count to fix this issue, if ct helper is used by others, reject the delete request. Apply this patch: # nfct helper delete ssdp nfct v1.4.3: netlink error: Device or resource busy Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_helper.h | 2 ++ net/netfilter/nf_conntrack_helper.c | 6 ++++++ net/netfilter/nfnetlink_cthelper.c | 17 +++++++++++------ 3 files changed, 19 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index c1c12411103a..c519bb5b5bb8 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -9,6 +9,7 @@ #ifndef _NF_CONNTRACK_HELPER_H #define _NF_CONNTRACK_HELPER_H +#include #include #include #include @@ -26,6 +27,7 @@ struct nf_conntrack_helper { struct hlist_node hnode; /* Internal use. */ char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ + refcount_t refcnt; struct module *me; /* pointer to self */ const struct nf_conntrack_expect_policy *expect_policy; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index e17006b6e434..7f6100ca63be 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) #endif if (h != NULL && !try_module_get(h->me)) h = NULL; + if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) { + module_put(h->me); + h = NULL; + } rcu_read_unlock(); @@ -183,6 +187,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); void nf_conntrack_helper_put(struct nf_conntrack_helper *helper) { + refcount_dec(&helper->refcnt); module_put(helper->me); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_put); @@ -423,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) } } } + refcount_set(&me->refcnt, 1); hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); nf_ct_helper_count++; out: diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 950bf6eadc65..be678a323598 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, tuple_set = true; } + ret = -ENOENT; list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { cur = &nlcth->helper; j++; @@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, tuple.dst.protonum != cur->tuple.dst.protonum)) continue; - found = true; - nf_conntrack_helper_unregister(cur); - kfree(cur->expect_policy); + if (refcount_dec_if_one(&cur->refcnt)) { + found = true; + nf_conntrack_helper_unregister(cur); + kfree(cur->expect_policy); - list_del(&nlcth->list); - kfree(nlcth); + list_del(&nlcth->list); + kfree(nlcth); + } else { + ret = -EBUSY; + } } /* Make sure we return success if we flush and there is no helpers */ - return (found || j == 0) ? 0 : -ENOENT; + return (found || j == 0) ? 0 : ret; } static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { -- cgit v1.2.3 From 324318f0248c31be8a08984146e7e4dd7cdd091d Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 9 May 2017 16:17:37 -0400 Subject: netfilter: xtables: zero padding in data_to_user When looking up an iptables rule, the iptables binary compares the aligned match and target data (XT_ALIGN). In some cases this can exceed the actual data size to include padding bytes. Before commit f77bc5b23fb1 ("iptables: use match, target and data copy_to_user helpers") the malloc()ed bytes were overwritten by the kernel with kzalloced contents, zeroing the padding and making the comparison succeed. After this patch, the kernel copies and clears only data, leaving the padding bytes undefined. Extend the clear operation from data size to aligned data size to include the padding bytes, if any. Padding bytes can be observed in both match and target, and the bug triggered, by issuing a rule with match icmp and target ACCEPT: iptables -t mangle -A INPUT -i lo -p icmp --icmp-type 1 -j ACCEPT iptables -t mangle -D INPUT -i lo -p icmp --icmp-type 1 -j ACCEPT Fixes: f77bc5b23fb1 ("iptables: use match, target and data copy_to_user helpers") Reported-by: Paul Moore Reported-by: Richard Guy Briggs Signed-off-by: Willem de Bruijn Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 2 +- net/bridge/netfilter/ebtables.c | 9 ++++++--- net/netfilter/x_tables.c | 9 ++++++--- 3 files changed, 13 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index be378cf47fcc..b3044c2c62cb 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m, int xt_target_to_user(const struct xt_entry_target *t, struct xt_entry_target __user *u); int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size); + int usersize, int size, int aligned_size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9ec0c9f908fa..9c6e619f452b 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name, strlcpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || - xt_data_to_user(um + entrysize, data, usersize, datasize)) + xt_data_to_user(um + entrysize, data, usersize, datasize, + XT_ALIGN(datasize))) return -EFAULT; return 0; @@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, if (match->compat_to_user(cm->data, m->data)) return -EFAULT; } else { - if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) + if (xt_data_to_user(cm->data, m->data, match->usersize, msize, + COMPAT_XT_ALIGN(msize))) return -EFAULT; } @@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, if (target->compat_to_user(cm->data, t->data)) return -EFAULT; } else { - if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) + if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, + COMPAT_XT_ALIGN(tsize))) return -EFAULT; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8876b7da6884..d17769599c10 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -283,12 +283,13 @@ static int xt_obj_to_user(u16 __user *psize, u16 size, &U->u.user.revision, K->u.kernel.TYPE->revision) int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size) + int usersize, int size, int aligned_size) { usersize = usersize ? : size; if (copy_to_user(dst, src, usersize)) return -EFAULT; - if (usersize != size && clear_user(dst + usersize, size - usersize)) + if (usersize != aligned_size && + clear_user(dst + usersize, aligned_size - usersize)) return -EFAULT; return 0; @@ -298,7 +299,9 @@ EXPORT_SYMBOL_GPL(xt_data_to_user); #define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ xt_data_to_user(U->data, K->data, \ K->u.kernel.TYPE->usersize, \ - C_SIZE ? : K->u.kernel.TYPE->TYPE##size) + C_SIZE ? : K->u.kernel.TYPE->TYPE##size, \ + C_SIZE ? COMPAT_XT_ALIGN(C_SIZE) : \ + XT_ALIGN(K->u.kernel.TYPE->TYPE##size)) int xt_match_to_user(const struct xt_entry_match *m, struct xt_entry_match __user *u) -- cgit v1.2.3 From 591054469b3eef34bc097c30fae8ededddf8d796 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 15 May 2017 11:17:34 +0100 Subject: netfilter: nf_tables: revisit chain/object refcounting from elements Andreas reports that the following incremental update using our commit protocol doesn't work. # nft -f incremental-update.nft delete element ip filter client_to_any { 10.180.86.22 : goto CIn_1 } delete chain ip filter CIn_1 ... Error: Could not process rule: Device or resource busy The existing code is not well-integrated into the commit phase protocol, since element deletions do not result in refcount decrement from the preparation phase. This results in bogus EBUSY errors like the one above. Two new functions come with this patch: * nft_set_elem_activate() function is used from the abort path, to restore the set element refcounting on objects that occurred from the preparation phase. * nft_set_elem_deactivate() that is called from nft_del_setelem() to decrement set element refcounting on objects from the preparation phase in the commit protocol. The nft_data_uninit() has been renamed to nft_data_release() since this function does not uninitialize any data store in the data register, instead just releases the references to objects. Moreover, a new function nft_data_hold() has been introduced to be used from nft_set_elem_activate(). Reported-by: Andreas Schultz Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 +- net/netfilter/nf_tables_api.c | 82 ++++++++++++++++++++++++++++++++++----- net/netfilter/nft_bitwise.c | 4 +- net/netfilter/nft_cmp.c | 2 +- net/netfilter/nft_immediate.c | 5 ++- net/netfilter/nft_range.c | 4 +- 6 files changed, 81 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 028faec8fc27..8a8bab8d7b15 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -176,7 +176,7 @@ struct nft_data_desc { int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, unsigned int size, struct nft_data_desc *desc, const struct nlattr *nla); -void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); +void nft_data_release(const struct nft_data *data, enum nft_data_types type); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, enum nft_data_types type, unsigned int len); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5f4a4d48b871..da314be0c048 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3627,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, { struct nft_set_ext *ext = nft_set_elem_ext(set, elem); - nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); + nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE); if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) - nft_data_uninit(nft_set_ext_data(ext), set->dtype); + nft_data_release(nft_set_ext_data(ext), set->dtype); if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) @@ -3638,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, } EXPORT_SYMBOL_GPL(nft_set_elem_destroy); +/* Only called from commit path, nft_set_elem_deactivate() already deals with + * the refcounting from the preparation phase. + */ +static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem) +{ + struct nft_set_ext *ext = nft_set_elem_ext(set, elem); + + if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) + nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); + kfree(elem); +} + static int nft_setelem_parse_flags(const struct nft_set *set, const struct nlattr *attr, u32 *flags) { @@ -3849,9 +3861,9 @@ err4: kfree(elem.priv); err3: if (nla[NFTA_SET_ELEM_DATA] != NULL) - nft_data_uninit(&data, d2.type); + nft_data_release(&data, d2.type); err2: - nft_data_uninit(&elem.key.val, d1.type); + nft_data_release(&elem.key.val, d1.type); err1: return err; } @@ -3896,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, return err; } +/** + * nft_data_hold - hold a nft_data item + * + * @data: struct nft_data to release + * @type: type of data + * + * Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded, + * NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and + * NFT_GOTO verdicts. This function must be called on active data objects + * from the second phase of the commit protocol. + */ +static void nft_data_hold(const struct nft_data *data, enum nft_data_types type) +{ + if (type == NFT_DATA_VERDICT) { + switch (data->verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + data->verdict.chain->use++; + break; + } + } +} + +static void nft_set_elem_activate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem) +{ + const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); + + if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) + nft_data_hold(nft_set_ext_data(ext), set->dtype); + if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) + (*nft_set_ext_obj(ext))->use++; +} + +static void nft_set_elem_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem) +{ + const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); + + if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) + nft_data_release(nft_set_ext_data(ext), set->dtype); + if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) + (*nft_set_ext_obj(ext))->use--; +} + static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { @@ -3961,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, kfree(elem.priv); elem.priv = priv; + nft_set_elem_deactivate(ctx->net, set, &elem); + nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; @@ -3970,7 +4031,7 @@ err4: err3: kfree(elem.priv); err2: - nft_data_uninit(&elem.key.val, desc.type); + nft_data_release(&elem.key.val, desc.type); err1: return err; } @@ -4777,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans) nft_set_destroy(nft_trans_set(trans)); break; case NFT_MSG_DELSETELEM: - nft_set_elem_destroy(nft_trans_elem_set(trans), - nft_trans_elem(trans).priv, true); + nf_tables_set_elem_destroy(nft_trans_elem_set(trans), + nft_trans_elem(trans).priv); break; case NFT_MSG_DELOBJ: nft_obj_destroy(nft_trans_obj(trans)); @@ -5013,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) case NFT_MSG_DELSETELEM: te = (struct nft_trans_elem *)trans->data; + nft_set_elem_activate(net, te->set, &te->elem); te->set->ops->activate(net, te->set, &te->elem); te->set->ndeact--; @@ -5498,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx, EXPORT_SYMBOL_GPL(nft_data_init); /** - * nft_data_uninit - release a nft_data item + * nft_data_release - release a nft_data item * * @data: struct nft_data to release * @type: type of data @@ -5506,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init); * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, * all others need to be released by calling this function. */ -void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) +void nft_data_release(const struct nft_data *data, enum nft_data_types type) { if (type < NFT_DATA_VERDICT) return; @@ -5517,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) WARN_ON(1); } } -EXPORT_SYMBOL_GPL(nft_data_uninit); +EXPORT_SYMBOL_GPL(nft_data_release); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, enum nft_data_types type, unsigned int len) diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 96bd4f325b0f..fff8073e2a56 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -99,9 +99,9 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, return 0; err2: - nft_data_uninit(&priv->xor, d2.type); + nft_data_release(&priv->xor, d2.type); err1: - nft_data_uninit(&priv->mask, d1.type); + nft_data_release(&priv->mask, d1.type); return err; } diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 8c9d0fb19118..c2945eb3397c 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -211,7 +211,7 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) return &nft_cmp_ops; err1: - nft_data_uninit(&data, desc.type); + nft_data_release(&data, desc.type); return ERR_PTR(-EINVAL); } diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 728baf88295a..4717d7796927 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx, return 0; err1: - nft_data_uninit(&priv->data, desc.type); + nft_data_release(&priv->data, desc.type); return err; } @@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); - return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); + + return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); } static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 9edc74eedc10..cedb96c3619f 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr priv->len = desc_from.len; return 0; err2: - nft_data_uninit(&priv->data_to, desc_to.type); + nft_data_release(&priv->data_to, desc_to.type); err1: - nft_data_uninit(&priv->data_from, desc_from.type); + nft_data_release(&priv->data_from, desc_from.type); return err; } -- cgit v1.2.3 From c953d63548207a085abcb12a15fefc8a11ffdf0a Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Tue, 16 May 2017 09:30:18 +0800 Subject: ebtables: arpreply: Add the standard target sanity check The info->target comes from userspace and it would be used directly. So we need to add the sanity check to make sure it is a valid standard target, although the ebtables tool has already checked it. Kernel needs to validate anything coming from userspace. If the target is set as an evil value, it would break the ebtables and cause a panic. Because the non-standard target is treated as one offset. Now add one helper function ebt_invalid_target, and we would replace the macro INVALID_TARGET later. Signed-off-by: Gao Feng Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_bridge/ebtables.h | 5 +++++ net/bridge/netfilter/ebt_arpreply.c | 3 +++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a30efb437e6d..e0cbf17af780 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, /* True if the target is not a standard target */ #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) +static inline bool ebt_invalid_target(int target) +{ + return (target < -NUM_STANDARD_TARGETS || target >= 0); +} + #endif diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index 5929309beaa1..db85230e49c3 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c @@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par) if (e->ethproto != htons(ETH_P_ARP) || e->invflags & EBT_IPROTO) return -EINVAL; + if (ebt_invalid_target(info->target)) + return -EINVAL; + return 0; } -- cgit v1.2.3 From 9512a16b0e1217bbef73d276a67c28b5fbb46512 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 16 May 2017 15:57:42 -0400 Subject: nfsd: Revert "nfsd: check for oversized NFSv2/v3 arguments" This reverts commit 51f567777799 "nfsd: check for oversized NFSv2/v3 arguments", which breaks support for NFSv3 ACLs. That patch was actually an earlier draft of a fix for the problem that was eventually fixed by e6838a29ecb "nfsd: check for oversized NFSv2/v3 arguments". But somehow I accidentally left this earlier draft in the branch that was part of my 2.12 pull request. Reported-by: Eryu Guan Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs3xdr.c | 23 ++++++----------------- fs/nfsd/nfsxdr.c | 13 +++---------- include/linux/sunrpc/svc.h | 3 ++- 3 files changed, 11 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 12feac6ee2fd..452334694a5d 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); - args->count = ntohl(*p++); - - if (!xdr_argsize_check(rqstp, p)) - return 0; + args->count = ntohl(*p++); len = min(args->count, max_blocksize); /* set up the kvec */ @@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, v++; } args->vlen = v; - return 1; + return xdr_argsize_check(rqstp, p); } int @@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, p = decode_fh(p, &args->fh); if (!p) return 0; - if (!xdr_argsize_check(rqstp, p)) - return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return 1; + return xdr_argsize_check(rqstp, p); } int @@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, args->verf = p; p += 2; args->dircount = ~0; args->count = ntohl(*p++); - - if (!xdr_argsize_check(rqstp, p)) - return 0; - args->count = min_t(u32, args->count, PAGE_SIZE); args->buffer = page_address(*(rqstp->rq_next_page++)); - return 1; + return xdr_argsize_check(rqstp, p); } int @@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, args->dircount = ntohl(*p++); args->count = ntohl(*p++); - if (!xdr_argsize_check(rqstp, p)) - return 0; - len = args->count = min(args->count, max_blocksize); while (len > 0) { struct page *p = *(rqstp->rq_next_page++); @@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, args->buffer = page_address(p); len -= PAGE_SIZE; } - return 1; + + return xdr_argsize_check(rqstp, p); } int diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 6a4947a3f4fa..de07ff625777 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, len = args->count = ntohl(*p++); p++; /* totalcount - unused */ - if (!xdr_argsize_check(rqstp, p)) - return 0; - len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); /* set up somewhere to store response. @@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, v++; } args->vlen = v; - return 1; + return xdr_argsize_check(rqstp, p); } int @@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli p = decode_fh(p, &args->fh); if (!p) return 0; - if (!xdr_argsize_check(rqstp, p)) - return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return 1; + return xdr_argsize_check(rqstp, p); } int @@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, args->cookie = ntohl(*p++); args->count = ntohl(*p++); args->count = min_t(u32, args->count, PAGE_SIZE); - if (!xdr_argsize_check(rqstp, p)) - return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return 1; + return xdr_argsize_check(rqstp, p); } /* diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 94631026f79c..11cef5a7bc87 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) { char *cp = (char *)p; struct kvec *vec = &rqstp->rq_arg.head[0]; - return cp == (char *)vec->iov_base + vec->iov_len; + return cp >= (char*)vec->iov_base + && cp <= (char*)vec->iov_base + vec->iov_len; } static inline int -- cgit v1.2.3 From 93491ced3c87c94b12220dbac0527e1356702179 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 May 2017 18:18:29 +0200 Subject: USB: hub: fix SS max number of ports Add define for the maximum number of ports on a SuperSpeed hub as per USB 3.1 spec Table 10-5, and use it when verifying the retrieved hub descriptor. This specifically avoids benign attempts to update the DeviceRemovable mask for non-existing ports (should we get that far). Fixes: dbe79bbe9dcb ("USB 3.0 Hub Changes") Acked-by: Alan Stern Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 8 +++++++- include/uapi/linux/usb/ch11.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f77a4ebde7d5..b8bb20d7acdb 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1336,7 +1336,13 @@ static int hub_configure(struct usb_hub *hub, if (ret < 0) { message = "can't read hub descriptor"; goto fail; - } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { + } + + maxchild = USB_MAXCHILDREN; + if (hub_is_superspeed(hdev)) + maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); + + if (hub->descriptor->bNbrPorts > maxchild) { message = "hub has too many ports!"; ret = -ENODEV; goto fail; diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 361297e96f58..576c704e3fb8 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -22,6 +22,9 @@ */ #define USB_MAXCHILDREN 31 +/* See USB 3.1 spec Table 10-5 */ +#define USB_SS_MAXPORTS 15 + /* * Hub request types */ -- cgit v1.2.3 From 8b0d3ea555876533b6aa61479335be2c9bdb47e7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 16 May 2017 14:10:33 -0400 Subject: net: dsa: store CPU port pointer in the tree A dsa_switch_tree instance holds a dsa_switch pointer and a port index to identify the switch port to which the CPU is attached. Now that the DSA layer has a dsa_port structure to hold this data, use it to point the switch CPU port. This patch simply substitutes s/dst->cpu_switch/dst->cpu_dp->ds/ and s/dst->cpu_port/dst->cpu_dp->index/. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 4 ++-- drivers/net/dsa/bcm_sf2.c | 4 ++-- drivers/net/dsa/mv88e6060.c | 2 +- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 13 ++++++------- net/dsa/dsa2.c | 14 ++++++-------- net/dsa/legacy.c | 10 ++++------ net/dsa/slave.c | 10 +++++----- net/dsa/tag_brcm.c | 2 +- net/dsa/tag_qca.c | 2 +- net/dsa/tag_trailer.c | 2 +- 11 files changed, 30 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index fa0eece21eef..658a12c888a8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1344,7 +1344,7 @@ EXPORT_SYMBOL(b53_fdb_dump); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - s8 cpu_port = ds->dst->cpu_port; + s8 cpu_port = ds->dst->cpu_dp->index; u16 pvlan, reg; unsigned int i; @@ -1390,7 +1390,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; struct b53_vlan *vl = &dev->vlans[0]; - s8 cpu_port = ds->dst->cpu_port; + s8 cpu_port = ds->dst->cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2be963252ca5..215d41c1e71f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -228,7 +228,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->dst[ds->index].cpu_port; + s8 cpu_port = ds->dst->cpu_dp->index; unsigned int i; u32 reg; @@ -832,7 +832,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->dst[ds->index].master_netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->dst[ds->index].cpu_port; + s8 cpu_port = ds->dst->cpu_dp->index; struct ethtool_wolinfo pwol; p->ethtool_ops->get_wol(p, &pwol); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 5934b7a4c448..dce7fa57eb55 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -176,7 +176,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | (dsa_is_cpu_port(ds, p) ? ds->enabled_port_mask : - BIT(ds->dst->cpu_port))); + BIT(ds->dst->cpu_dp->index))); /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a4fd4ccf7b67..942b9ac7f92a 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -507,7 +507,7 @@ qca8k_setup(struct dsa_switch *ds) pr_warn("regmap initialization failed"); /* Initialize CPU port pad mode (xMII type, delays...) */ - phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn); + phy_mode = of_get_phy_mode(ds->dst->cpu_dp->dn); if (phy_mode < 0) { pr_err("Can't find phy-mode for master device\n"); return phy_mode; diff --git a/include/net/dsa.h b/include/net/dsa.h index 8e24677b1c62..118a8bd2fd9a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -137,10 +137,9 @@ struct dsa_switch_tree { const struct ethtool_ops *master_orig_ethtool_ops; /* - * The switch and port to which the CPU is attached. + * The switch port to which the CPU is attached. */ - struct dsa_switch *cpu_switch; - s8 cpu_port; + struct dsa_port *cpu_dp; /* * Data for the individual switch chips. @@ -251,7 +250,7 @@ struct dsa_switch { static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { - return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port); + return ds->dst->cpu_dp == &ds->ports[p]; } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) @@ -279,10 +278,10 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) * Else return the (DSA) port number that connects to the * switch that is one hop closer to the cpu. */ - if (dst->cpu_switch == ds) - return dst->cpu_port; + if (dst->cpu_dp->ds == ds) + return dst->cpu_dp->index; else - return ds->rtable[dst->cpu_switch->index]; + return ds->rtable[dst->cpu_dp->ds->index]; } struct switchdev_trans; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 033b3bfb63dc..2ac62349ba12 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -443,8 +443,8 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst) return err; } - if (dst->cpu_switch) { - err = dsa_cpu_port_ethtool_setup(dst->cpu_switch); + if (dst->cpu_dp) { + err = dsa_cpu_port_ethtool_setup(dst->cpu_dp->ds); if (err) return err; } @@ -484,8 +484,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst) dsa_ds_unapply(dst, ds); } - if (dst->cpu_switch) - dsa_cpu_port_ethtool_restore(dst->cpu_switch); + if (dst->cpu_dp) + dsa_cpu_port_ethtool_restore(dst->cpu_dp->ds); pr_info("DSA: tree %d unapplied\n", dst->tree); dst->applied = false; @@ -518,10 +518,8 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index, if (!dst->master_netdev) dst->master_netdev = ethernet_dev; - if (!dst->cpu_switch) { - dst->cpu_switch = ds; - dst->cpu_port = index; - } + if (!dst->cpu_dp) + dst->cpu_dp = port; tag_protocol = ds->ops->get_tag_protocol(ds); dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index ad345c8b0b06..bb28b011ba5a 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -115,13 +115,12 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) continue; if (!strcmp(name, "cpu")) { - if (dst->cpu_switch) { + if (dst->cpu_dp) { netdev_err(dst->master_netdev, "multiple cpu ports?!\n"); return -EINVAL; } - dst->cpu_switch = ds; - dst->cpu_port = i; + dst->cpu_dp = &ds->ports[i]; ds->cpu_port_mask |= 1 << i; } else if (!strcmp(name, "dsa")) { ds->dsa_port_mask |= 1 << i; @@ -144,7 +143,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) * tagging protocol to the preferred tagging format of this * switch. */ - if (dst->cpu_switch == ds) { + if (dst->cpu_dp->ds == ds) { enum dsa_tag_protocol tag_protocol; tag_protocol = ops->get_tag_protocol(ds); @@ -624,7 +623,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->pd = pd; dst->master_netdev = dev; - dst->cpu_port = -1; for (i = 0; i < pd->nr_chips; i++) { struct dsa_switch *ds; @@ -735,7 +733,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst) dsa_switch_destroy(ds); } - dsa_cpu_port_ethtool_restore(dst->cpu_switch); + dsa_cpu_port_ethtool_restore(dst->cpu_dp->ds); dev_put(dst->master_netdev); } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7693182df81e..77324c483d14 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -821,8 +821,8 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, uint64_t *data) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->cpu_switch; - s8 cpu_port = dst->cpu_port; + struct dsa_switch *ds = dst->cpu_dp->ds; + s8 cpu_port = dst->cpu_dp->index; int count = 0; if (dst->master_ethtool_ops.get_sset_count) { @@ -838,7 +838,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev, static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->cpu_switch; + struct dsa_switch *ds = dst->cpu_dp->ds; int count = 0; if (dst->master_ethtool_ops.get_sset_count) @@ -854,8 +854,8 @@ static void dsa_cpu_port_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct dsa_switch_tree *dst = dev->dsa_ptr; - struct dsa_switch *ds = dst->cpu_switch; - s8 cpu_port = dst->cpu_port; + struct dsa_switch *ds = dst->cpu_dp->ds; + s8 cpu_port = dst->cpu_dp->index; int len = ETH_GSTRING_LEN; int mcount = 0, count; unsigned int i; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 2a9b52c5af86..658ddee63dc9 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -101,7 +101,7 @@ static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, int source_port; u8 *brcm_tag; - ds = dst->cpu_switch; + ds = dst->cpu_dp->ds; if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN))) goto out_drop; diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 3ba3f59f7a34..be3b67750ac8 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -99,7 +99,7 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, /* This protocol doesn't support cascading multiple switches so it's * safe to assume the switch is first in the tree */ - ds = dst->cpu_switch; + ds = dst->cpu_dp->ds; if (!ds) goto out_drop; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index aafc2fc74c30..aa05e276ea22 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -67,7 +67,7 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, u8 *trailer; int source_port; - ds = dst->cpu_switch; + ds = dst->cpu_dp->ds; if (skb_linearize(skb)) goto out_drop; -- cgit v1.2.3 From eb7b721129f1dc67041662da229a28dfc5c3c1dd Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 16 May 2017 22:40:07 +0200 Subject: net: dsa: Sort DSA tagging protocol drivers With more tag protocols being added, regain some order by sorting the entries in various places. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 8 ++++---- net/dsa/Kconfig | 8 ++++---- net/dsa/Makefile | 6 +++--- net/dsa/dsa.c | 18 +++++++++--------- net/dsa/dsa_priv.h | 18 +++++++++--------- 5 files changed, 29 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 118a8bd2fd9a..ed767beca9c6 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -27,13 +27,13 @@ struct fixed_phy_status; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_BRCM, DSA_TAG_PROTO_DSA, - DSA_TAG_PROTO_TRAILER, DSA_TAG_PROTO_EDSA, - DSA_TAG_PROTO_BRCM, - DSA_TAG_PROTO_QCA, - DSA_TAG_PROTO_MTK, DSA_TAG_PROTO_LAN9303, + DSA_TAG_PROTO_MTK, + DSA_TAG_PROTO_QCA, + DSA_TAG_PROTO_TRAILER, DSA_TAG_LAST, /* MUST BE LAST */ }; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 81a0868edb1d..297389b2ab35 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -25,16 +25,16 @@ config NET_DSA_TAG_DSA config NET_DSA_TAG_EDSA bool -config NET_DSA_TAG_TRAILER +config NET_DSA_TAG_LAN9303 bool -config NET_DSA_TAG_QCA +config NET_DSA_TAG_MTK bool -config NET_DSA_TAG_MTK +config NET_DSA_TAG_TRAILER bool -config NET_DSA_TAG_LAN9303 +config NET_DSA_TAG_QCA bool endif diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 0b747d75e65a..f8c0251d1f43 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -6,7 +6,7 @@ dsa_core-y += dsa.o slave.o dsa2.o switch.o legacy.o dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o -dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o -dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o -dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o +dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o +dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o +dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 26130ae438da..c0a1307c87dd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -40,26 +40,26 @@ static const struct dsa_device_ops none_ops = { }; const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { +#ifdef CONFIG_NET_DSA_TAG_BRCM + [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops, +#endif #ifdef CONFIG_NET_DSA_TAG_DSA [DSA_TAG_PROTO_DSA] = &dsa_netdev_ops, #endif #ifdef CONFIG_NET_DSA_TAG_EDSA [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops, #endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER - [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops, +#ifdef CONFIG_NET_DSA_TAG_LAN9303 + [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops, #endif -#ifdef CONFIG_NET_DSA_TAG_BRCM - [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops, +#ifdef CONFIG_NET_DSA_TAG_MTK + [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops, #endif #ifdef CONFIG_NET_DSA_TAG_QCA [DSA_TAG_PROTO_QCA] = &qca_netdev_ops, #endif -#ifdef CONFIG_NET_DSA_TAG_MTK - [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops, -#endif -#ifdef CONFIG_NET_DSA_TAG_LAN9303 - [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops, +#ifdef CONFIG_NET_DSA_TAG_TRAILER + [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops, #endif [DSA_TAG_PROTO_NONE] = &none_ops, }; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index f4a88e485213..e9003b79cbbc 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -75,25 +75,25 @@ void dsa_slave_unregister_notifier(void); int dsa_switch_register_notifier(struct dsa_switch *ds); void dsa_switch_unregister_notifier(struct dsa_switch *ds); +/* tag_brcm.c */ +extern const struct dsa_device_ops brcm_netdev_ops; + /* tag_dsa.c */ extern const struct dsa_device_ops dsa_netdev_ops; /* tag_edsa.c */ extern const struct dsa_device_ops edsa_netdev_ops; -/* tag_trailer.c */ -extern const struct dsa_device_ops trailer_netdev_ops; +/* tag_lan9303.c */ +extern const struct dsa_device_ops lan9303_netdev_ops; -/* tag_brcm.c */ -extern const struct dsa_device_ops brcm_netdev_ops; +/* tag_mtk.c */ +extern const struct dsa_device_ops mtk_netdev_ops; /* tag_qca.c */ extern const struct dsa_device_ops qca_netdev_ops; -/* tag_mtk.c */ -extern const struct dsa_device_ops mtk_netdev_ops; - -/* tag_lan9303.c */ -extern const struct dsa_device_ops lan9303_netdev_ops; +/* tag_trailer.c */ +extern const struct dsa_device_ops trailer_netdev_ops; #endif -- cgit v1.2.3 From 87d83093bfc2f4938ff21524ebb50ecf53c15a64 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:07:54 +0200 Subject: net: sched: move tc_classify function to cls_api.c Move tc_classify function to cls_api.c where it belongs, rename it to fit the namespace. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 9 +++++++++ include/net/pkt_sched.h | 3 --- net/core/dev.c | 5 +++-- net/sched/cls_api.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ net/sched/sch_api.c | 48 ------------------------------------------------ net/sched/sch_atm.c | 2 +- net/sched/sch_cbq.c | 2 +- net/sched/sch_drr.c | 2 +- net/sched/sch_dsmark.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_hfsc.c | 2 +- net/sched/sch_htb.c | 2 +- net/sched/sch_multiq.c | 2 +- net/sched/sch_prio.c | 2 +- net/sched/sch_qfq.c | 2 +- net/sched/sch_sfb.c | 2 +- net/sched/sch_sfq.c | 2 +- 17 files changed, 72 insertions(+), 65 deletions(-) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 269fd78bb0ae..cb745067feb3 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -19,10 +19,19 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #ifdef CONFIG_NET_CLS void tcf_destroy_chain(struct tcf_proto __rcu **fl); +int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode); + #else static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl) { } + +static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode) +{ + return TC_ACT_UNSPEC; +} #endif static inline unsigned long diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index bec46f63f10c..2579c209ea51 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -113,9 +113,6 @@ static inline void qdisc_run(struct Qdisc *q) __qdisc_run(q); } -int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res, bool compat_mode); - static inline __be16 tc_skb_protocol(const struct sk_buff *skb) { /* We need to take extra care in case the skb came via diff --git a/net/core/dev.c b/net/core/dev.c index fca407b4a6ea..acd594c56f0a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -105,6 +105,7 @@ #include #include #include +#include #include #include #include @@ -3178,7 +3179,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ qdisc_bstats_cpu_update(cl->q, skb); - switch (tc_classify(skb, cl, &cl_res, false)) { + switch (tcf_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); @@ -3948,7 +3949,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, skb->tc_at_ingress = 1; qdisc_bstats_cpu_update(cl->q, skb); - switch (tc_classify(skb, cl, &cl_res, false)) { + switch (tcf_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 22f88b35a546..af58bbef6610 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -196,6 +196,54 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl) } EXPORT_SYMBOL(tcf_destroy_chain); +/* Main classifier routine: scans classifier chain attached + * to this qdisc, (optionally) tests for protocol and asks + * specific classifiers. + */ +int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode) +{ + __be16 protocol = tc_skb_protocol(skb); +#ifdef CONFIG_NET_CLS_ACT + const int max_reclassify_loop = 4; + const struct tcf_proto *old_tp = tp; + int limit = 0; + +reclassify: +#endif + for (; tp; tp = rcu_dereference_bh(tp->next)) { + int err; + + if (tp->protocol != protocol && + tp->protocol != htons(ETH_P_ALL)) + continue; + + err = tp->classify(skb, tp, res); +#ifdef CONFIG_NET_CLS_ACT + if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) + goto reset; +#endif + if (err >= 0) + return err; + } + + return TC_ACT_UNSPEC; /* signal: continue lookup */ +#ifdef CONFIG_NET_CLS_ACT +reset: + if (unlikely(limit++ >= max_reclassify_loop)) { + net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", + tp->q->ops->id, tp->prio & 0xffff, + ntohs(tp->protocol)); + return TC_ACT_SHOT; + } + + tp = old_tp; + protocol = tc_skb_protocol(skb); + goto reclassify; +#endif +} +EXPORT_SYMBOL(tcf_classify); + /* Add/change/delete/get a filter node */ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index e88342fde1bc..a3bcd972d940 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1878,54 +1878,6 @@ done: return skb->len; } -/* Main classifier routine: scans classifier chain attached - * to this qdisc, (optionally) tests for protocol and asks - * specific classifiers. - */ -int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res, bool compat_mode) -{ - __be16 protocol = tc_skb_protocol(skb); -#ifdef CONFIG_NET_CLS_ACT - const int max_reclassify_loop = 4; - const struct tcf_proto *old_tp = tp; - int limit = 0; - -reclassify: -#endif - for (; tp; tp = rcu_dereference_bh(tp->next)) { - int err; - - if (tp->protocol != protocol && - tp->protocol != htons(ETH_P_ALL)) - continue; - - err = tp->classify(skb, tp, res); -#ifdef CONFIG_NET_CLS_ACT - if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) - goto reset; -#endif - if (err >= 0) - return err; - } - - return TC_ACT_UNSPEC; /* signal: continue lookup */ -#ifdef CONFIG_NET_CLS_ACT -reset: - if (unlikely(limit++ >= max_reclassify_loop)) { - net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", - tp->q->ops->id, tp->prio & 0xffff, - ntohs(tp->protocol)); - return TC_ACT_SHOT; - } - - tp = old_tp; - protocol = tc_skb_protocol(skb); - goto reclassify; -#endif -} -EXPORT_SYMBOL(tc_classify); - #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 40cbceed4de8..89d32fad9f89 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -377,7 +377,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, list_for_each_entry(flow, &p->flows, list) { fl = rcu_dereference_bh(flow->filter_list); if (fl) { - result = tc_classify(skb, fl, &res, true); + result = tcf_classify(skb, fl, &res, true); if (result < 0) continue; flow = (struct atm_flow_data *)res.class; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 7415859fd4c3..c543ea3e3043 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -233,7 +233,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) /* * Step 2+n. Apply classifier. */ - result = tc_classify(skb, fl, &res, true); + result = tcf_classify(skb, fl, &res, true); if (!fl || result < 0) goto fallback; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 58a8c32eab23..446d79bb25d9 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -333,7 +333,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; fl = rcu_dereference_bh(q->filter_list); - result = tc_classify(skb, fl, &res, false); + result = tcf_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1c0f877f673a..7bc638d2e67f 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -234,7 +234,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, else { struct tcf_result res; struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); - int result = tc_classify(skb, fl, &res, false); + int result = tcf_classify(skb, fl, &res, false); pr_debug("result %d class 0x%04x\n", result, res.classid); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 9201abce928c..42ba81ad327c 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -96,7 +96,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, return fq_codel_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, filter, &res, false); + result = tcf_classify(skb, filter, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5cb82f6c1b06..b0dcab199205 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1142,7 +1142,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; head = &q->root; tcf = rcu_dereference_bh(q->root.filter_list); - while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) { + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 570ef3b0c09b..640f5f336195 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -231,7 +231,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) { + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 43a3a10b3c81..25bb9ffc2df1 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -46,7 +46,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - err = tc_classify(skb, fl, &res, false); + err = tcf_classify(skb, fl, &res, false); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 92c2e6d448d7..7997363f7e0d 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -42,7 +42,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { fl = rcu_dereference_bh(q->filter_list); - err = tc_classify(skb, fl, &res, false); + err = tcf_classify(skb, fl, &res, false); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 041eba3006cc..73c7ac37b570 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -720,7 +720,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; fl = rcu_dereference_bh(q->filter_list); - result = tc_classify(skb, fl, &res, false); + result = tcf_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 0f777273ba29..b287880829e2 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -259,7 +259,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, struct tcf_result res; int result; - result = tc_classify(skb, fl, &res, false); + result = tcf_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 332d94be6e1c..53a641f2ccb5 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -180,7 +180,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, return sfq_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, fl, &res, false); + result = tcf_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { -- cgit v1.2.3 From 6529eaba33f0465fc6d228e1d05b1745f7d0e8c9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:07:55 +0200 Subject: net: sched: introduce tcf block infractructure Currently, the filter chains are direcly put into the private structures of qdiscs. In order to be able to have multiple chains per qdisc and to allow filter chains sharing among qdiscs, there is a need for common object that would hold the chains. This introduces such object and calls it "tcf_block". Helpers to get and put the blocks are provided to be called from individual qdisc code. Also, the original filter_list pointers are left in qdisc privs to allow the entry into tcf_block processing without any added overhead of possible multiple pointer dereference on fast path. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 13 ++++++++-- include/net/sch_generic.h | 7 +++++- net/sched/cls_api.c | 48 +++++++++++++++++++++++++++++-------- net/sched/sch_api.c | 2 +- net/sched/sch_atm.c | 27 ++++++++++++++------- net/sched/sch_cbq.c | 19 ++++++++++----- net/sched/sch_drr.c | 13 ++++++---- net/sched/sch_dsmark.c | 17 ++++++++----- net/sched/sch_fq_codel.c | 15 ++++++++---- net/sched/sch_hfsc.c | 19 ++++++++++----- net/sched/sch_htb.c | 26 +++++++++++++------- net/sched/sch_ingress.c | 61 ++++++++++++++++++++++++++++++++++------------- net/sched/sch_multiq.c | 14 +++++++---- net/sched/sch_prio.c | 17 +++++++++---- net/sched/sch_qfq.c | 14 +++++++---- net/sched/sch_sfb.c | 15 ++++++++---- net/sched/sch_sfq.c | 15 ++++++++---- 17 files changed, 243 insertions(+), 99 deletions(-) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index cb745067feb3..e56e7157c280 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -18,12 +18,21 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #ifdef CONFIG_NET_CLS -void tcf_destroy_chain(struct tcf_proto __rcu **fl); +int tcf_block_get(struct tcf_block **p_block, + struct tcf_proto __rcu **p_filter_chain); +void tcf_block_put(struct tcf_block *block); int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); #else -static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl) +static inline +int tcf_block_get(struct tcf_block **p_block, + struct tcf_proto __rcu **p_filter_chain) +{ + return 0; +} + +static inline void tcf_block_put(struct tcf_block *block) { } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 22e52093bfda..98cf2f23602d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -153,7 +153,7 @@ struct Qdisc_class_ops { void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 classid); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); @@ -236,6 +236,7 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct tcf_block *block; struct rcu_head rcu; }; @@ -247,6 +248,10 @@ struct qdisc_skb_cb { unsigned char data[QDISC_CB_PRIV_LEN]; }; +struct tcf_block { + struct tcf_proto __rcu **p_filter_chain; +}; + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index af58bbef6610..d30116f77156 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -129,7 +129,8 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) } static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, - u32 prio, u32 parent, struct Qdisc *q) + u32 prio, u32 parent, struct Qdisc *q, + struct tcf_block *block) { struct tcf_proto *tp; int err; @@ -165,6 +166,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, tp->prio = prio; tp->classid = parent; tp->q = q; + tp->block = block; err = tp->ops->init(tp); if (err) { @@ -185,7 +187,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp) kfree_rcu(tp, rcu); } -void tcf_destroy_chain(struct tcf_proto __rcu **fl) +static void tcf_destroy_chain(struct tcf_proto __rcu **fl) { struct tcf_proto *tp; @@ -194,7 +196,28 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl) tcf_proto_destroy(tp); } } -EXPORT_SYMBOL(tcf_destroy_chain); + +int tcf_block_get(struct tcf_block **p_block, + struct tcf_proto __rcu **p_filter_chain) +{ + struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + + if (!block) + return -ENOMEM; + block->p_filter_chain = p_filter_chain; + *p_block = block; + return 0; +} +EXPORT_SYMBOL(tcf_block_get); + +void tcf_block_put(struct tcf_block *block) +{ + if (!block) + return; + tcf_destroy_chain(block->p_filter_chain); + kfree(block); +} +EXPORT_SYMBOL(tcf_block_put); /* Main classifier routine: scans classifier chain attached * to this qdisc, (optionally) tests for protocol and asks @@ -260,6 +283,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, struct Qdisc *q; struct tcf_proto __rcu **back; struct tcf_proto __rcu **chain; + struct tcf_block *block; struct tcf_proto *next; struct tcf_proto *tp; const struct Qdisc_class_ops *cops; @@ -328,7 +352,7 @@ replay: if (!cops) return -EINVAL; - if (cops->tcf_chain == NULL) + if (!cops->tcf_block) return -EOPNOTSUPP; /* Do we search for filter, attached to class? */ @@ -339,11 +363,13 @@ replay: } /* And the last stroke */ - chain = cops->tcf_chain(q, cl); - if (chain == NULL) { + block = cops->tcf_block(q, cl); + if (!block) { err = -EINVAL; goto errout; } + chain = block->p_filter_chain; + if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); tcf_destroy_chain(chain); @@ -387,7 +413,7 @@ replay: nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); tp = tcf_proto_create(nla_data(tca[TCA_KIND]), - protocol, nprio, parent, q); + protocol, nprio, parent, q, block); if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout; @@ -556,6 +582,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) int s_t; struct net_device *dev; struct Qdisc *q; + struct tcf_block *block; struct tcf_proto *tp, __rcu **chain; struct tcmsg *tcm = nlmsg_data(cb->nlh); unsigned long cl = 0; @@ -577,16 +604,17 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) cops = q->ops->cl_ops; if (!cops) goto errout; - if (cops->tcf_chain == NULL) + if (!cops->tcf_block) goto errout; if (TC_H_MIN(tcm->tcm_parent)) { cl = cops->get(q, tcm->tcm_parent); if (cl == 0) goto errout; } - chain = cops->tcf_chain(q, cl); - if (chain == NULL) + block = cops->tcf_block(q, cl); + if (!block) goto errout; + chain = block->p_filter_chain; s_t = cb->args[0]; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a3bcd972d940..5d95401bbc02 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -163,7 +163,7 @@ int register_qdisc(struct Qdisc_ops *qops) if (!(cops->get && cops->put && cops->walk && cops->leaf)) goto out_einval; - if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf)) + if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf)) goto out_einval; } diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 89d32fad9f89..f435546c3864 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -43,6 +43,7 @@ struct atm_flow_data { struct Qdisc *q; /* FIFO, TBF, etc. */ struct tcf_proto __rcu *filter_list; + struct tcf_block *block; struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); /* chaining */ @@ -143,7 +144,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl) list_del_init(&flow->list); pr_debug("atm_tc_put: qdisc %p\n", flow->q); qdisc_destroy(flow->q); - tcf_destroy_chain(&flow->filter_list); + tcf_block_put(flow->block); if (flow->sock) { pr_debug("atm_tc_put: f_count %ld\n", file_count(flow->sock->file)); @@ -274,7 +275,13 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, error = -ENOBUFS; goto err_out; } - RCU_INIT_POINTER(flow->filter_list, NULL); + + error = tcf_block_get(&flow->block, &flow->filter_list); + if (error) { + kfree(flow); + goto err_out; + } + flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!flow->q) flow->q = &noop_qdisc; @@ -346,14 +353,13 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); - return flow ? &flow->filter_list : &p->link.filter_list; + return flow ? flow->block : p->link.block; } /* --------------------------- Qdisc operations ---------------------------- */ @@ -524,6 +530,7 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch) static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) { struct atm_qdisc_data *p = qdisc_priv(sch); + int err; pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); INIT_LIST_HEAD(&p->flows); @@ -534,7 +541,11 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); - RCU_INIT_POINTER(p->link.filter_list, NULL); + + err = tcf_block_get(&p->link.block, &p->link.filter_list); + if (err) + return err; + p->link.vcc = NULL; p->link.sock = NULL; p->link.classid = sch->handle; @@ -561,7 +572,7 @@ static void atm_tc_destroy(struct Qdisc *sch) pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) - tcf_destroy_chain(&flow->filter_list); + tcf_block_put(flow->block); list_for_each_entry_safe(flow, tmp, &p->flows, list) { if (flow->ref > 1) @@ -646,7 +657,7 @@ static const struct Qdisc_class_ops atm_class_ops = { .change = atm_tc_change, .delete = atm_tc_delete, .walk = atm_tc_walk, - .tcf_chain = atm_tc_find_tcf, + .tcf_block = atm_tc_tcf_block, .bind_tcf = atm_tc_bind_filter, .unbind_tcf = atm_tc_put, .dump = atm_tc_dump_class, diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index c543ea3e3043..8dd6d0aca678 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -127,6 +127,7 @@ struct cbq_class { struct tc_cbq_xstats xstats; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; int refcnt; int filters; @@ -1405,7 +1406,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) WARN_ON(cl->filters); - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); qdisc_destroy(cl->q); qdisc_put_rtab(cl->R_tab); gen_kill_estimator(&cl->rate_est); @@ -1430,7 +1431,7 @@ static void cbq_destroy(struct Qdisc *sch) */ for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); } for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], @@ -1585,12 +1586,19 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (cl == NULL) goto failure; + err = tcf_block_get(&cl->block, &cl->filter_list); + if (err) { + kfree(cl); + return err; + } + if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, qdisc_root_sleeping_running(sch), tca[TCA_RATE]); if (err) { + tcf_block_put(cl->block); kfree(cl); goto failure; } @@ -1688,8 +1696,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) return 0; } -static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, - unsigned long arg) +static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class *)arg; @@ -1697,7 +1704,7 @@ static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, if (cl == NULL) cl = &q->link; - return &cl->filter_list; + return cl->block; } static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, @@ -1756,7 +1763,7 @@ static const struct Qdisc_class_ops cbq_class_ops = { .change = cbq_change_class, .delete = cbq_delete, .walk = cbq_walk, - .tcf_chain = cbq_find_tcf, + .tcf_block = cbq_tcf_block, .bind_tcf = cbq_bind_filter, .unbind_tcf = cbq_unbind_filter, .dump = cbq_dump_class, diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 446d79bb25d9..5db2a2843c66 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -36,6 +36,7 @@ struct drr_class { struct drr_sched { struct list_head active; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; struct Qdisc_class_hash clhash; }; @@ -190,15 +191,14 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg) drr_destroy_class(sch, cl); } -static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl) { struct drr_sched *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, @@ -431,6 +431,9 @@ static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) struct drr_sched *q = qdisc_priv(sch); int err; + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; @@ -462,7 +465,7 @@ static void drr_destroy_qdisc(struct Qdisc *sch) struct hlist_node *next; unsigned int i; - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], @@ -477,7 +480,7 @@ static const struct Qdisc_class_ops drr_class_ops = { .delete = drr_delete_class, .get = drr_get_class, .put = drr_put_class, - .tcf_chain = drr_tcf_chain, + .tcf_block = drr_tcf_block, .bind_tcf = drr_bind_tcf, .unbind_tcf = drr_unbind_tcf, .graft = drr_graft_class, diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 7bc638d2e67f..ba45102cff94 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -44,6 +44,7 @@ struct mask_value { struct dsmark_qdisc_data { struct Qdisc *q; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; struct mask_value *mv; u16 indices; u8 set_tc_index; @@ -183,11 +184,11 @@ ignore: } } -static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl) { struct dsmark_qdisc_data *p = qdisc_priv(sch); - return &p->filter_list; + + return p->block; } /* --------------------------- Qdisc operations ---------------------------- */ @@ -332,7 +333,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) { struct dsmark_qdisc_data *p = qdisc_priv(sch); struct nlattr *tb[TCA_DSMARK_MAX + 1]; - int err = -EINVAL; + int err; u32 default_index = NO_DEFAULT_INDEX; u16 indices; int i; @@ -342,6 +343,10 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) if (!opt) goto errout; + err = tcf_block_get(&p->block, &p->filter_list); + if (err) + return err; + err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL); if (err < 0) goto errout; @@ -400,7 +405,7 @@ static void dsmark_destroy(struct Qdisc *sch) pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); - tcf_destroy_chain(&p->filter_list); + tcf_block_put(p->block); qdisc_destroy(p->q); if (p->mv != p->embedded) kfree(p->mv); @@ -468,7 +473,7 @@ static const struct Qdisc_class_ops dsmark_class_ops = { .change = dsmark_change, .delete = dsmark_delete, .walk = dsmark_walk, - .tcf_chain = dsmark_find_tcf, + .tcf_block = dsmark_tcf_block, .bind_tcf = dsmark_bind_filter, .unbind_tcf = dsmark_put, .dump = dsmark_dump_class, diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 42ba81ad327c..f201e73947fb 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -55,6 +55,7 @@ struct fq_codel_flow { struct fq_codel_sched_data { struct tcf_proto __rcu *filter_list; /* optional external classifier */ + struct tcf_block *block; struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ u32 *backlogs; /* backlog table [flows_cnt] */ u32 flows_cnt; /* number of flows */ @@ -450,7 +451,7 @@ static void fq_codel_destroy(struct Qdisc *sch) { struct fq_codel_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); kvfree(q->backlogs); kvfree(q->flows); } @@ -459,6 +460,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) { struct fq_codel_sched_data *q = qdisc_priv(sch); int i; + int err; sch->limit = 10*1024; q->flows_cnt = 1024; @@ -478,6 +480,10 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) return err; } + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; + if (!q->flows) { q->flows = kvzalloc(q->flows_cnt * sizeof(struct fq_codel_flow), GFP_KERNEL); @@ -589,14 +595,13 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl) { struct fq_codel_sched_data *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, @@ -679,7 +684,7 @@ static const struct Qdisc_class_ops fq_codel_class_ops = { .leaf = fq_codel_leaf, .get = fq_codel_get, .put = fq_codel_put, - .tcf_chain = fq_codel_find_tcf, + .tcf_block = fq_codel_tcf_block, .bind_tcf = fq_codel_bind, .unbind_tcf = fq_codel_put, .dump = fq_codel_dump_class, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b0dcab199205..a324f84b1ccd 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -116,6 +116,7 @@ struct hfsc_class { struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct tcf_proto __rcu *filter_list; /* filter list */ + struct tcf_block *block; unsigned int filter_cnt; /* filter count */ unsigned int level; /* class level in hierarchy */ @@ -1040,12 +1041,19 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; + err = tcf_block_get(&cl->block, &cl->filter_list); + if (err) { + kfree(cl); + return err; + } + if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, qdisc_root_sleeping_running(sch), tca[TCA_RATE]); if (err) { + tcf_block_put(cl->block); kfree(cl); return err; } @@ -1091,7 +1099,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) { struct hfsc_sched *q = qdisc_priv(sch); - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); qdisc_destroy(cl->qdisc); gen_kill_estimator(&cl->rate_est); if (cl != &q->root) @@ -1261,8 +1269,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) cl->filter_cnt--; } -static struct tcf_proto __rcu ** -hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) +static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; @@ -1270,7 +1277,7 @@ hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) if (cl == NULL) cl = &q->root; - return &cl->filter_list; + return cl->block; } static int @@ -1515,7 +1522,7 @@ hfsc_destroy_qdisc(struct Qdisc *sch) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); } for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], @@ -1662,7 +1669,7 @@ static const struct Qdisc_class_ops hfsc_class_ops = { .put = hfsc_put_class, .bind_tcf = hfsc_bind_tcf, .unbind_tcf = hfsc_unbind_tcf, - .tcf_chain = hfsc_tcf_chain, + .tcf_block = hfsc_tcf_block, .dump = hfsc_dump_class, .dump_stats = hfsc_dump_class_stats, .walk = hfsc_walk diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 640f5f336195..195bbca9eb0b 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -105,6 +105,7 @@ struct htb_class { int quantum; /* but stored for parent-to-leaf return */ struct tcf_proto __rcu *filter_list; /* class attached filters */ + struct tcf_block *block; int filter_cnt; int refcnt; /* usage count of this class */ @@ -156,6 +157,7 @@ struct htb_sched { /* filters for qdisc itself */ struct tcf_proto __rcu *filter_list; + struct tcf_block *block; #define HTB_WARN_TOOMANYEVENTS 0x1 unsigned int warned; /* only one warning */ @@ -1017,6 +1019,10 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; + err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL); if (err < 0) return err; @@ -1230,7 +1236,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) qdisc_destroy(cl->un.leaf.q); } gen_kill_estimator(&cl->rate_est); - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); kfree(cl); } @@ -1248,11 +1254,11 @@ static void htb_destroy(struct Qdisc *sch) * because filter need its target class alive to be able to call * unbind_filter on it (without Oops). */ - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) - tcf_destroy_chain(&cl->filter_list); + tcf_block_put(cl->block); } for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], @@ -1396,6 +1402,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, if (!cl) goto failure; + err = tcf_block_get(&cl->block, &cl->filter_list); + if (err) { + kfree(cl); + goto failure; + } if (htb_rate_est || tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, @@ -1403,6 +1414,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, qdisc_root_sleeping_running(sch), tca[TCA_RATE] ? : &est.nla); if (err) { + tcf_block_put(cl->block); kfree(cl); goto failure; } @@ -1521,14 +1533,12 @@ failure: return err; } -static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch, - unsigned long arg) +static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg) { struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; - struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; - return fl; + return cl ? cl->block : q->block; } static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, @@ -1591,7 +1601,7 @@ static const struct Qdisc_class_ops htb_class_ops = { .change = htb_change_class, .delete = htb_delete, .walk = htb_walk, - .tcf_chain = htb_find_tcf, + .tcf_block = htb_tcf_block, .bind_tcf = htb_bind_filter, .unbind_tcf = htb_unbind_filter, .dump = htb_dump_class, diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 3bab5f66c392..d8a9bebcab90 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -18,6 +18,10 @@ #include #include +struct ingress_sched_data { + struct tcf_block *block; +}; + static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; @@ -47,16 +51,23 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } -static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl) { - struct net_device *dev = qdisc_dev(sch); + struct ingress_sched_data *q = qdisc_priv(sch); - return &dev->ingress_cl_list; + return q->block; } static int ingress_init(struct Qdisc *sch, struct nlattr *opt) { + struct ingress_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + int err; + + err = tcf_block_get(&q->block, &dev->ingress_cl_list); + if (err) + return err; + net_inc_ingress_queue(); sch->flags |= TCQ_F_CPUSTATS; @@ -65,9 +76,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt) static void ingress_destroy(struct Qdisc *sch) { - struct net_device *dev = qdisc_dev(sch); + struct ingress_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&dev->ingress_cl_list); + tcf_block_put(q->block); net_dec_ingress_queue(); } @@ -91,7 +102,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { .get = ingress_get, .put = ingress_put, .walk = ingress_walk, - .tcf_chain = ingress_find_tcf, + .tcf_block = ingress_tcf_block, .tcf_cl_offload = ingress_cl_offload, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_put, @@ -100,12 +111,18 @@ static const struct Qdisc_class_ops ingress_class_ops = { static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", + .priv_size = sizeof(struct ingress_sched_data), .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, .owner = THIS_MODULE, }; +struct clsact_sched_data { + struct tcf_block *ingress_block; + struct tcf_block *egress_block; +}; + static unsigned long clsact_get(struct Qdisc *sch, u32 classid) { switch (TC_H_MIN(classid)) { @@ -128,16 +145,15 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch, return clsact_get(sch, classid); } -static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl) { - struct net_device *dev = qdisc_dev(sch); + struct clsact_sched_data *q = qdisc_priv(sch); switch (cl) { case TC_H_MIN(TC_H_MIN_INGRESS): - return &dev->ingress_cl_list; + return q->ingress_block; case TC_H_MIN(TC_H_MIN_EGRESS): - return &dev->egress_cl_list; + return q->egress_block; default: return NULL; } @@ -145,6 +161,18 @@ static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch, static int clsact_init(struct Qdisc *sch, struct nlattr *opt) { + struct clsact_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + int err; + + err = tcf_block_get(&q->ingress_block, &dev->ingress_cl_list); + if (err) + return err; + + err = tcf_block_get(&q->egress_block, &dev->egress_cl_list); + if (err) + return err; + net_inc_ingress_queue(); net_inc_egress_queue(); @@ -155,10 +183,10 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt) static void clsact_destroy(struct Qdisc *sch) { - struct net_device *dev = qdisc_dev(sch); + struct clsact_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&dev->ingress_cl_list); - tcf_destroy_chain(&dev->egress_cl_list); + tcf_block_put(q->egress_block); + tcf_block_put(q->ingress_block); net_dec_ingress_queue(); net_dec_egress_queue(); @@ -169,7 +197,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { .get = clsact_get, .put = ingress_put, .walk = ingress_walk, - .tcf_chain = clsact_find_tcf, + .tcf_block = clsact_tcf_block, .tcf_cl_offload = clsact_cl_offload, .bind_tcf = clsact_bind_filter, .unbind_tcf = ingress_put, @@ -178,6 +206,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { .cl_ops = &clsact_class_ops, .id = "clsact", + .priv_size = sizeof(struct clsact_sched_data), .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 25bb9ffc2df1..604767482ad0 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -32,6 +32,7 @@ struct multiq_sched_data { u16 max_bands; u16 curband; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; struct Qdisc **queues; }; @@ -170,7 +171,7 @@ multiq_destroy(struct Qdisc *sch) int band; struct multiq_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); @@ -243,6 +244,10 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; + q->max_bands = qdisc_dev(sch)->num_tx_queues; q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); @@ -367,14 +372,13 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static const struct Qdisc_class_ops multiq_class_ops = { @@ -383,7 +387,7 @@ static const struct Qdisc_class_ops multiq_class_ops = { .get = multiq_get, .put = multiq_put, .walk = multiq_walk, - .tcf_chain = multiq_find_tcf, + .tcf_block = multiq_tcf_block, .bind_tcf = multiq_bind, .unbind_tcf = multiq_put, .dump = multiq_dump_class, diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 7997363f7e0d..a2404688dd01 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -25,6 +25,7 @@ struct prio_sched_data { int bands; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; u8 prio2band[TC_PRIO_MAX+1]; struct Qdisc *queues[TCQ_PRIO_BANDS]; }; @@ -145,7 +146,7 @@ prio_destroy(struct Qdisc *sch) int prio; struct prio_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); for (prio = 0; prio < q->bands; prio++) qdisc_destroy(q->queues[prio]); } @@ -204,9 +205,16 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) static int prio_init(struct Qdisc *sch, struct nlattr *opt) { + struct prio_sched_data *q = qdisc_priv(sch); + int err; + if (!opt) return -EINVAL; + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; + return prio_tune(sch, opt); } @@ -317,14 +325,13 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static const struct Qdisc_class_ops prio_class_ops = { @@ -333,7 +340,7 @@ static const struct Qdisc_class_ops prio_class_ops = { .get = prio_get, .put = prio_put, .walk = prio_walk, - .tcf_chain = prio_find_tcf, + .tcf_block = prio_tcf_block, .bind_tcf = prio_bind, .unbind_tcf = prio_put, .dump = prio_dump_class, diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 73c7ac37b570..076ad032befb 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -182,6 +182,7 @@ struct qfq_group { struct qfq_sched { struct tcf_proto __rcu *filter_list; + struct tcf_block *block; struct Qdisc_class_hash clhash; u64 oldV, V; /* Precise virtual times. */ @@ -582,15 +583,14 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg) qfq_destroy_class(sch, cl); } -static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl) { struct qfq_sched *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent, @@ -1438,6 +1438,10 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) int i, j, err; u32 max_cl_shift, maxbudg_shift, max_classes; + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; + err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; @@ -1492,7 +1496,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch) struct hlist_node *next; unsigned int i; - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], @@ -1508,7 +1512,7 @@ static const struct Qdisc_class_ops qfq_class_ops = { .delete = qfq_delete_class, .get = qfq_get_class, .put = qfq_put_class, - .tcf_chain = qfq_tcf_chain, + .tcf_block = qfq_tcf_block, .bind_tcf = qfq_bind_tcf, .unbind_tcf = qfq_unbind_tcf, .graft = qfq_graft_class, diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index b287880829e2..9756b1ccd345 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -56,6 +56,7 @@ struct sfb_bins { struct sfb_sched_data { struct Qdisc *qdisc; struct tcf_proto __rcu *filter_list; + struct tcf_block *block; unsigned long rehash_interval; unsigned long warmup_time; /* double buffering warmup time in jiffies */ u32 max; @@ -465,7 +466,7 @@ static void sfb_destroy(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); qdisc_destroy(q->qdisc); } @@ -549,6 +550,11 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt) static int sfb_init(struct Qdisc *sch, struct nlattr *opt) { struct sfb_sched_data *q = qdisc_priv(sch); + int err; + + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; q->qdisc = &noop_qdisc; return sfb_change(sch, opt); @@ -657,14 +663,13 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl) { struct sfb_sched_data *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, @@ -682,7 +687,7 @@ static const struct Qdisc_class_ops sfb_class_ops = { .change = sfb_change_class, .delete = sfb_delete, .walk = sfb_walk, - .tcf_chain = sfb_find_tcf, + .tcf_block = sfb_tcf_block, .bind_tcf = sfb_bind, .unbind_tcf = sfb_put, .dump = sfb_dump_class, diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 53a641f2ccb5..66dfd15b7946 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -126,6 +126,7 @@ struct sfq_sched_data { u8 flags; unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ struct tcf_proto __rcu *filter_list; + struct tcf_block *block; sfq_index *ht; /* Hash table ('divisor' slots) */ struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ @@ -697,7 +698,7 @@ static void sfq_destroy(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); + tcf_block_put(q->block); q->perturb_period = 0; del_timer_sync(&q->perturb_timer); sfq_free(q->ht); @@ -709,6 +710,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); int i; + int err; + + err = tcf_block_get(&q->block, &q->filter_list); + if (err) + return err; setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, (unsigned long)sch); @@ -815,14 +821,13 @@ static void sfq_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch, - unsigned long cl) +static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl) { struct sfq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; - return &q->filter_list; + return q->block; } static int sfq_dump_class(struct Qdisc *sch, unsigned long cl, @@ -878,7 +883,7 @@ static const struct Qdisc_class_ops sfq_class_ops = { .leaf = sfq_leaf, .get = sfq_get, .put = sfq_put, - .tcf_chain = sfq_find_tcf, + .tcf_block = sfq_tcf_block, .bind_tcf = sfq_bind, .unbind_tcf = sfq_put, .dump = sfq_dump_class, -- cgit v1.2.3 From 2190d1d0944f84c55cdfdb89c7920f8f9311bdde Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:07:59 +0200 Subject: net: sched: introduce helpers to work with filter chains Introduce struct tcf_chain object and set of helpers around it. Wraps up insertion, deletion and search in the filter chain. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/sch_generic.h | 7 ++- net/sched/cls_api.c | 148 +++++++++++++++++++++++++++++++++------------- 2 files changed, 113 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 98cf2f23602d..52bceede534b 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -248,10 +248,15 @@ struct qdisc_skb_cb { unsigned char data[QDISC_CB_PRIV_LEN]; }; -struct tcf_block { +struct tcf_chain { + struct tcf_proto __rcu *filter_chain; struct tcf_proto __rcu **p_filter_chain; }; +struct tcf_block { + struct tcf_chain *chain; +}; + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 690457c988b2..fee3d7faeb79 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -106,13 +106,12 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, - struct tcf_proto __rcu **chain, int event) + struct tcf_chain *chain, int event) { - struct tcf_proto __rcu **it_chain; struct tcf_proto *tp; - for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; - it_chain = &tp->next) + for (tp = rtnl_dereference(chain->filter_chain); + tp; tp = rtnl_dereference(tp->next)) tfilter_notify(net, oskb, n, tp, 0, event, false); } @@ -187,26 +186,49 @@ static void tcf_proto_destroy(struct tcf_proto *tp) kfree_rcu(tp, rcu); } -static void tcf_chain_destroy(struct tcf_proto __rcu **fl) +static struct tcf_chain *tcf_chain_create(void) +{ + return kzalloc(sizeof(struct tcf_chain), GFP_KERNEL); +} + +static void tcf_chain_destroy(struct tcf_chain *chain) { struct tcf_proto *tp; - while ((tp = rtnl_dereference(*fl)) != NULL) { - RCU_INIT_POINTER(*fl, tp->next); + while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { + RCU_INIT_POINTER(chain->filter_chain, tp->next); tcf_proto_destroy(tp); } + kfree(chain); +} + +static void +tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain, + struct tcf_proto __rcu **p_filter_chain) +{ + chain->p_filter_chain = p_filter_chain; } int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain) { struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + int err; if (!block) return -ENOMEM; - block->p_filter_chain = p_filter_chain; + block->chain = tcf_chain_create(); + if (!block->chain) { + err = -ENOMEM; + goto err_chain_create; + } + tcf_chain_filter_chain_ptr_set(block->chain, p_filter_chain); *p_block = block; return 0; + +err_chain_create: + kfree(block); + return err; } EXPORT_SYMBOL(tcf_block_get); @@ -214,7 +236,7 @@ void tcf_block_put(struct tcf_block *block) { if (!block) return; - tcf_chain_destroy(block->p_filter_chain); + tcf_chain_destroy(block->chain); kfree(block); } EXPORT_SYMBOL(tcf_block_put); @@ -267,6 +289,65 @@ reset: } EXPORT_SYMBOL(tcf_classify); +struct tcf_chain_info { + struct tcf_proto __rcu **pprev; + struct tcf_proto __rcu *next; +}; + +static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) +{ + return rtnl_dereference(*chain_info->pprev); +} + +static void tcf_chain_tp_insert(struct tcf_chain *chain, + struct tcf_chain_info *chain_info, + struct tcf_proto *tp) +{ + if (chain->p_filter_chain && + *chain_info->pprev == chain->filter_chain) + *chain->p_filter_chain = tp; + RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); + rcu_assign_pointer(*chain_info->pprev, tp); +} + +static void tcf_chain_tp_remove(struct tcf_chain *chain, + struct tcf_chain_info *chain_info, + struct tcf_proto *tp) +{ + struct tcf_proto *next = rtnl_dereference(chain_info->next); + + if (chain->p_filter_chain && tp == chain->filter_chain) + *chain->p_filter_chain = next; + RCU_INIT_POINTER(*chain_info->pprev, next); +} + +static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, + struct tcf_chain_info *chain_info, + u32 protocol, u32 prio, + bool prio_allocate) +{ + struct tcf_proto **pprev; + struct tcf_proto *tp; + + /* Check the chain for existence of proto-tcf with this priority */ + for (pprev = &chain->filter_chain; + (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { + if (tp->prio >= prio) { + if (tp->prio == prio) { + if (prio_allocate || + (tp->protocol != protocol && protocol)) + return ERR_PTR(-EINVAL); + } else { + tp = NULL; + } + break; + } + } + chain_info->pprev = pprev; + chain_info->next = tp ? tp->next : NULL; + return tp; +} + /* Add/change/delete/get a filter node */ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, @@ -281,10 +362,9 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, u32 parent; struct net_device *dev; struct Qdisc *q; - struct tcf_proto __rcu **back; - struct tcf_proto __rcu **chain; + struct tcf_chain_info chain_info; + struct tcf_chain *chain; struct tcf_block *block; - struct tcf_proto *next; struct tcf_proto *tp; const struct Qdisc_class_ops *cops; unsigned long cl; @@ -369,7 +449,7 @@ replay: err = -EINVAL; goto errout; } - chain = block->p_filter_chain; + chain = block->chain; if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); @@ -378,22 +458,11 @@ replay: goto errout; } - /* Check the chain for existence of proto-tcf with this priority */ - for (back = chain; - (tp = rtnl_dereference(*back)) != NULL; - back = &tp->next) { - if (tp->prio >= prio) { - if (tp->prio == prio) { - if (prio_allocate || - (tp->protocol != protocol && protocol)) { - err = -EINVAL; - goto errout; - } - } else { - tp = NULL; - } - break; - } + tp = tcf_chain_tp_find(chain, &chain_info, protocol, + prio, prio_allocate); + if (IS_ERR(tp)) { + err = PTR_ERR(tp); + goto errout; } if (tp == NULL) { @@ -411,7 +480,7 @@ replay: } if (prio_allocate) - prio = tcf_auto_prio(rtnl_dereference(*back)); + prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); tp = tcf_proto_create(nla_data(tca[TCA_KIND]), protocol, prio, parent, q, block); @@ -429,8 +498,7 @@ replay: if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { - next = rtnl_dereference(tp->next); - RCU_INIT_POINTER(*back, next); + tcf_chain_tp_remove(chain, &chain_info, tp); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER, false); tcf_proto_destroy(tp); @@ -459,11 +527,10 @@ replay: err = tp->ops->delete(tp, fh, &last); if (err) goto errout; - next = rtnl_dereference(tp->next); tfilter_notify(net, skb, n, tp, t->tcm_handle, RTM_DELTFILTER, false); if (last) { - RCU_INIT_POINTER(*back, next); + tcf_chain_tp_remove(chain, &chain_info, tp); tcf_proto_destroy(tp); } goto errout; @@ -480,10 +547,8 @@ replay: err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); if (err == 0) { - if (tp_created) { - RCU_INIT_POINTER(tp->next, rtnl_dereference(*back)); - rcu_assign_pointer(*back, tp); - } + if (tp_created) + tcf_chain_tp_insert(chain, &chain_info, tp); tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false); } else { if (tp_created) @@ -584,7 +649,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) struct net_device *dev; struct Qdisc *q; struct tcf_block *block; - struct tcf_proto *tp, __rcu **chain; + struct tcf_proto *tp; + struct tcf_chain *chain; struct tcmsg *tcm = nlmsg_data(cb->nlh); unsigned long cl = 0; const struct Qdisc_class_ops *cops; @@ -615,11 +681,11 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) block = cops->tcf_block(q, cl); if (!block) goto errout; - chain = block->p_filter_chain; + chain = block->chain; s_t = cb->args[0]; - for (tp = rtnl_dereference(*chain), t = 0; + for (tp = rtnl_dereference(chain->filter_chain), t = 0; tp; tp = rtnl_dereference(tp->next), t++) { if (t < s_t) continue; -- cgit v1.2.3 From 5bc1701881e395cec51811d07ec6961f3d1b2612 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:08:01 +0200 Subject: net: sched: introduce multichain support for filters Instead of having only one filter per block, introduce a list of chains for every block. Create chain 0 by default. UAPI is extended so the user can specify which chain he wants to change. If the new attribute is not specified, chain 0 is used. That allows to maintain backward compatibility. If chain does not exist and user wants to manipulate with it, new chain is created with specified index. Also, when last filter is removed from the chain, the chain is destroyed. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 + include/net/sch_generic.h | 9 +++- include/uapi/linux/rtnetlink.h | 1 + net/sched/cls_api.c | 104 ++++++++++++++++++++++++++++++++++------- 4 files changed, 98 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e56e7157c280..2c213a69c196 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -18,6 +18,8 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #ifdef CONFIG_NET_CLS +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index); +void tcf_chain_put(struct tcf_chain *chain); int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain); void tcf_block_put(struct tcf_block *block); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 52bceede534b..569b5654c30c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -236,7 +237,7 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; - struct tcf_block *block; + struct tcf_chain *chain; struct rcu_head rcu; }; @@ -251,10 +252,14 @@ struct qdisc_skb_cb { struct tcf_chain { struct tcf_proto __rcu *filter_chain; struct tcf_proto __rcu **p_filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; /* chain index */ + unsigned int refcnt; }; struct tcf_block { - struct tcf_chain *chain; + struct list_head chain_list; }; static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index cce061382e40..6487b21b2c1e 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -549,6 +549,7 @@ enum { TCA_STAB, TCA_PAD, TCA_DUMP_INVISIBLE, + TCA_CHAIN, __TCA_MAX }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 63aa2ea5f00c..adacaf299c4a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -129,7 +129,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, u32 prio, u32 parent, struct Qdisc *q, - struct tcf_block *block) + struct tcf_chain *chain) { struct tcf_proto *tp; int err; @@ -165,7 +165,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, tp->prio = prio; tp->classid = parent; tp->q = q; - tp->block = block; + tp->chain = chain; err = tp->ops->init(tp); if (err) { @@ -186,15 +186,26 @@ static void tcf_proto_destroy(struct tcf_proto *tp) kfree_rcu(tp, rcu); } -static struct tcf_chain *tcf_chain_create(void) +static struct tcf_chain *tcf_chain_create(struct tcf_block *block, + u32 chain_index) { - return kzalloc(sizeof(struct tcf_chain), GFP_KERNEL); + struct tcf_chain *chain; + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return NULL; + list_add_tail(&chain->list, &block->chain_list); + chain->block = block; + chain->index = chain_index; + chain->refcnt = 1; + return chain; } static void tcf_chain_destroy(struct tcf_chain *chain) { struct tcf_proto *tp; + list_del(&chain->list); while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { RCU_INIT_POINTER(chain->filter_chain, tp->next); tcf_proto_destroy(tp); @@ -202,6 +213,30 @@ static void tcf_chain_destroy(struct tcf_chain *chain) kfree(chain); } +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index) +{ + struct tcf_chain *chain; + + list_for_each_entry(chain, &block->chain_list, list) { + if (chain->index == chain_index) { + chain->refcnt++; + return chain; + } + } + return tcf_chain_create(block, chain_index); +} +EXPORT_SYMBOL(tcf_chain_get); + +void tcf_chain_put(struct tcf_chain *chain) +{ + /* Destroy unused chain, with exception of chain 0, which is the + * default one and has to be always present. + */ + if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0) + tcf_chain_destroy(chain); +} +EXPORT_SYMBOL(tcf_chain_put); + static void tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain, struct tcf_proto __rcu **p_filter_chain) @@ -213,16 +248,19 @@ int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain) { struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + struct tcf_chain *chain; int err; if (!block) return -ENOMEM; - block->chain = tcf_chain_create(); - if (!block->chain) { + INIT_LIST_HEAD(&block->chain_list); + /* Create chain 0 by default, it has to be always present. */ + chain = tcf_chain_create(block, 0); + if (!chain) { err = -ENOMEM; goto err_chain_create; } - tcf_chain_filter_chain_ptr_set(block->chain, p_filter_chain); + tcf_chain_filter_chain_ptr_set(chain, p_filter_chain); *p_block = block; return 0; @@ -234,9 +272,13 @@ EXPORT_SYMBOL(tcf_block_get); void tcf_block_put(struct tcf_block *block) { + struct tcf_chain *chain, *tmp; + if (!block) return; - tcf_chain_destroy(block->chain); + + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) + tcf_chain_destroy(chain); kfree(block); } EXPORT_SYMBOL(tcf_block_put); @@ -360,10 +402,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, u32 prio; bool prio_allocate; u32 parent; + u32 chain_index; struct net_device *dev; struct Qdisc *q; struct tcf_chain_info chain_info; - struct tcf_chain *chain; + struct tcf_chain *chain = NULL; struct tcf_block *block; struct tcf_proto *tp; const struct Qdisc_class_ops *cops; @@ -449,7 +492,17 @@ replay: err = -EINVAL; goto errout; } - chain = block->chain; + + chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; + if (chain_index > TC_ACT_EXT_VAL_MASK) { + err = -EINVAL; + goto errout; + } + chain = tcf_chain_get(block, chain_index); + if (!chain) { + err = -ENOMEM; + goto errout; + } if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); @@ -483,7 +536,7 @@ replay: prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); tp = tcf_proto_create(nla_data(tca[TCA_KIND]), - protocol, prio, parent, q, block); + protocol, prio, parent, q, chain); if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout; @@ -556,6 +609,8 @@ replay: } errout: + if (chain) + tcf_chain_put(chain); if (cl) cops->put(q, cl); if (err == -EAGAIN) @@ -584,6 +639,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) + goto nla_put_failure; tcm->tcm_handle = fh; if (RTM_DELTFILTER != event) { tcm->tcm_handle = 0; @@ -640,7 +697,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, RTM_NEWTFILTER); } -static void tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, +static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, struct netlink_callback *cb, long index_start, long *p_index) { @@ -667,7 +724,7 @@ static void tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) - break; + return false; cb->args[1] = 1; } @@ -682,14 +739,16 @@ static void tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, tp->ops->walk(tp, &arg.w); cb->args[1] = arg.w.count + 1; if (arg.w.stop) - break; + return false; } + return true; } /* called with RTNL */ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); + struct nlattr *tca[TCA_MAX + 1]; struct net_device *dev; struct Qdisc *q; struct tcf_block *block; @@ -699,9 +758,15 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) const struct Qdisc_class_ops *cops; long index_start; long index; + int err; if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; + + err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); + if (err) + return err; + dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return skb->len; @@ -725,11 +790,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) block = cops->tcf_block(q, cl); if (!block) goto errout; - chain = block->chain; index_start = cb->args[0]; index = 0; - tcf_chain_dump(chain, skb, cb, index_start, &index); + + list_for_each_entry(chain, &block->chain_list, list) { + if (tca[TCA_CHAIN] && + nla_get_u32(tca[TCA_CHAIN]) != chain->index) + continue; + if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) + break; + } + cb->args[0] = index; errout: -- cgit v1.2.3 From 9fb9f251d229f6cabd9dbe4214eb7f1e6a4e8a9d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:08:02 +0200 Subject: net: sched: push tp down to action init Tp pointer will be needed by the next patch in order to get the chain. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/act_api.h | 12 ++++++------ net/sched/act_api.c | 15 ++++++++------- net/sched/cls_api.c | 9 +++++---- 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/act_api.h b/include/net/act_api.h index cfa2ae33da9a..b22c6f3d6710 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -180,12 +180,12 @@ int tcf_unregister_action(struct tc_action_ops *a, int tcf_action_destroy(struct list_head *actions, int bind); int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); -int tcf_action_init(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind, struct list_head *); -struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind); +int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, + struct nlattr *est, char *name, int ovr, int bind, + struct list_head *actions); +struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, + struct nlattr *nla, struct nlattr *est, + char *name, int ovr, int bind); int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index a90e8f355c00..e389eb45b484 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -570,9 +570,9 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) return c; } -struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, - struct nlattr *est, char *name, int ovr, - int bind) +struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, + struct nlattr *nla, struct nlattr *est, + char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; @@ -680,8 +680,9 @@ static void cleanup_a(struct list_head *actions, int ovr) a->tcfa_refcnt--; } -int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est, - char *name, int ovr, int bind, struct list_head *actions) +int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, + struct nlattr *est, char *name, int ovr, int bind, + struct list_head *actions) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; @@ -693,7 +694,7 @@ int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est, return err; for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { - act = tcf_action_init_1(net, tb[i], est, name, ovr, bind); + act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind); if (IS_ERR(act)) { err = PTR_ERR(act); goto err; @@ -1020,7 +1021,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, int ret = 0; LIST_HEAD(actions); - ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); + ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions); if (ret) return ret; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index adacaf299c4a..9e0c4bb82528 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -832,8 +832,9 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct tc_action *act; if (exts->police && tb[exts->police]) { - act = tcf_action_init_1(net, tb[exts->police], rate_tlv, - "police", ovr, TCA_ACT_BIND); + act = tcf_action_init_1(net, tp, tb[exts->police], + rate_tlv, "police", ovr, + TCA_ACT_BIND); if (IS_ERR(act)) return PTR_ERR(act); @@ -844,8 +845,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, LIST_HEAD(actions); int err, i = 0; - err = tcf_action_init(net, tb[exts->action], rate_tlv, - NULL, ovr, TCA_ACT_BIND, + err = tcf_action_init(net, tp, tb[exts->action], + rate_tlv, NULL, ovr, TCA_ACT_BIND, &actions); if (err) return err; -- cgit v1.2.3 From db50514f9a9c7ef1f17e9921b1cc0902746872f3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 May 2017 11:08:03 +0200 Subject: net: sched: add termination action to allow goto chain Introduce new type of termination action called "goto_chain". This allows user to specify a chain to be processed. This action type is then processed as a return value in tcf_classify loop in similar way as "reclassify" is, only it does not reset to the first filter in chain but rather reset to the first filter of the desired chain. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/act_api.h | 1 + include/net/sch_generic.h | 9 +++++++-- include/uapi/linux/pkt_cls.h | 1 + net/sched/act_api.c | 40 ++++++++++++++++++++++++++++++++++++++++ net/sched/cls_api.c | 6 +++++- 5 files changed, 54 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/act_api.h b/include/net/act_api.h index b22c6f3d6710..26ffd8333f50 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -42,6 +42,7 @@ struct tc_action { struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; struct tc_cookie *act_cookie; + struct tcf_chain *goto_chain; }; #define tcf_head common.tcfa_head #define tcf_index common.tcfa_index diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 569b5654c30c..368850194c94 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -193,8 +193,13 @@ struct Qdisc_ops { struct tcf_result { - unsigned long class; - u32 classid; + union { + struct { + unsigned long class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; }; struct tcf_proto_ops { diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index d613be3b3239..1b9aa9e6b4fd 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -51,6 +51,7 @@ enum { (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode) #define TC_ACT_JUMP __TC_ACT_EXT(1) +#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2) /* Action type identifiers*/ enum { diff --git a/net/sched/act_api.c b/net/sched/act_api.c index e389eb45b484..0ecf2a858767 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -28,6 +28,31 @@ #include #include +static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) +{ + u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK; + + if (!tp) + return -EINVAL; + a->goto_chain = tcf_chain_get(tp->chain->block, chain_index); + if (!a->goto_chain) + return -ENOMEM; + return 0; +} + +static void tcf_action_goto_chain_fini(struct tc_action *a) +{ + tcf_chain_put(a->goto_chain); +} + +static void tcf_action_goto_chain_exec(const struct tc_action *a, + struct tcf_result *res) +{ + const struct tcf_chain *chain = a->goto_chain; + + res->goto_tp = rcu_dereference_bh(chain->filter_chain); +} + static void free_tcf(struct rcu_head *head) { struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu); @@ -39,6 +64,8 @@ static void free_tcf(struct rcu_head *head) kfree(p->act_cookie->data); kfree(p->act_cookie); } + if (p->goto_chain) + tcf_action_goto_chain_fini(p); kfree(p); } @@ -465,6 +492,8 @@ repeat: else /* faulty graph, stop pipeline */ return TC_ACT_OK; } + } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { + tcf_action_goto_chain_exec(a, res); } if (ret != TC_ACT_PIPE) @@ -657,6 +686,17 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (err != ACT_P_CREATED) module_put(a_o->owner); + if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { + err = tcf_action_goto_chain_init(a, tp); + if (err) { + LIST_HEAD(actions); + + list_add_tail(&a->list, &actions); + tcf_action_destroy(&actions, bind); + return ERR_PTR(err); + } + } + return a; err_mod: diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9e0c4bb82528..4020b8d932a1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -307,8 +307,12 @@ reclassify: err = tp->classify(skb, tp, res); #ifdef CONFIG_NET_CLS_ACT - if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) + if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { goto reset; + } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { + old_tp = res->goto_tp; + goto reset; + } #endif if (err >= 0) return err; -- cgit v1.2.3 From ec66eda82d4b0c552bf40005d8f53b63b2b07de4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 May 2017 14:00:01 -0700 Subject: tcp: introduce tcp_jiffies32 We abuse tcp_time_stamp for two different cases : 1) base to generate TCP Timestamp options (RFC 7323) 2) A 32bit version of jiffies since some TCP fields are 32bit wide to save memory. Since we want in the future to have 1ms TCP TS clock, regardless of HZ value, we want to cleanup things. tcp_jiffies32 is the truncated jiffies value, which will be used only in places where we want a 'host' timestamp. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/tcp.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index b4dc93dae98c..4b45be570821 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -700,11 +700,14 @@ u32 __tcp_select_window(struct sock *sk); void tcp_send_window_probe(struct sock *sk); -/* TCP timestamps are only 32-bits, this causes a slight - * complication on 64-bit systems since we store a snapshot - * of jiffies in the buffer control blocks below. We decided - * to use only the low 32-bits of jiffies and hide the ugly - * casts with the following macro. +/* TCP uses 32bit jiffies to save some space. + * Note that this is different from tcp_time_stamp, which + * historically has been the same until linux-4.13. + */ +#define tcp_jiffies32 ((u32)jiffies) + +/* Generator for TCP TS option (RFC 7323) + * Currently tied to 'jiffies' but will soon be driven by 1 ms clock. */ #define tcp_time_stamp ((__u32)(jiffies)) -- cgit v1.2.3 From d635fbe27ebee0f4b845abe5e9620c9400785a5c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 May 2017 14:00:03 -0700 Subject: tcp: use tcp_jiffies32 to feed tp->lsndtime Use tcp_jiffies32 instead of tcp_time_stamp to feed tp->lsndtime. tcp_time_stamp will soon be a litle bit more expensive than simply reading 'jiffies'. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_cubic.c | 2 +- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_output.c | 4 ++-- net/ipv4/tcp_timer.c | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 4b45be570821..feba4c0406e5 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1245,7 +1245,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) if (!sysctl_tcp_slow_start_after_idle || tp->packets_out || ca_ops->cong_control) return; - delta = tcp_time_stamp - tp->lsndtime; + delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) tcp_cwnd_restart(sk, delta); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1e4c76d2b827..d0bb61ee28bb 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2841,7 +2841,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_retrans = tp->retrans_out; info->tcpi_fackets = tp->fackets_out; - now = tcp_time_stamp; + now = tcp_jiffies32; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 0683ba447d77..2052ca740916 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -155,7 +155,7 @@ static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_TX_START) { struct bictcp *ca = inet_csk_ca(sk); - u32 now = tcp_time_stamp; + u32 now = tcp_jiffies32; s32 delta; delta = now - tcp_sk(sk)->lsndtime; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 06e2dbc2b4a2..c0b3f909df39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5571,7 +5571,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ - tp->lsndtime = tcp_time_stamp; + tp->lsndtime = tcp_jiffies32; tcp_init_buffer_space(sk); @@ -6008,7 +6008,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_update_pacing_rate(sk); /* Prevent spurious tcp_cwnd_restart() on first data packet */ - tp->lsndtime = tcp_time_stamp; + tp->lsndtime = tcp_jiffies32; tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4c8a6eaba6b3..be9f8f483e21 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -160,7 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - const u32 now = tcp_time_stamp; + const u32 now = tcp_jiffies32; if (tcp_packets_in_flight(tp) == 0) tcp_ca_event(sk, CA_EVENT_TX_START); @@ -1918,7 +1918,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, /* Avoid bursty behavior by allowing defer * only if the last write was recent. */ - if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) + if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0) goto send_now; in_flight = tcp_packets_in_flight(tp); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ec7c5473c788..5f6f219a431e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -63,7 +63,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) /* If peer does not open window for long time, or did not transmit * anything for long time, penalize it. */ - if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) + if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) shift++; /* If some dubious ICMP arrived, penalize even more. */ @@ -73,7 +73,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) if (tcp_check_oom(sk, shift)) { /* Catch exceptional cases, when connection requires reset. * 1. Last segment was sent recently. */ - if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || + if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || /* 2. Window is closed. */ (!tp->snd_wnd && !tp->packets_out)) do_reset = true; -- cgit v1.2.3 From 70eabf0e1b8fe11519f793416655266605f700b9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 May 2017 14:00:07 -0700 Subject: tcp: use tcp_jiffies32 for rcv_tstamp and lrcvtime Use tcp_jiffies32 instead of tcp_time_stamp, since tcp_time_stamp will soon be only used for TCP TS option. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++-- net/ipv4/tcp_input.c | 6 +++--- net/ipv4/tcp_minisocks.c | 2 +- net/ipv4/tcp_output.c | 2 +- net/ipv4/tcp_timer.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index feba4c0406e5..5b2932b8363f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1307,8 +1307,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) { const struct inet_connection_sock *icsk = &tp->inet_conn; - return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, - tcp_time_stamp - tp->rcv_tstamp); + return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, + tcp_jiffies32 - tp->rcv_tstamp); } static inline int tcp_fin_time(const struct sock *sk) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6a15c9b80b09..eeb4967df25a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -672,7 +672,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_rcv_rtt_measure(tp); - now = tcp_time_stamp; + now = tcp_jiffies32; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize @@ -3636,7 +3636,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; - tp->rcv_tstamp = tcp_time_stamp; + tp->rcv_tstamp = tcp_jiffies32; if (!prior_packets) goto no_queue; @@ -5554,7 +5554,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); - icsk->icsk_ack.lrcvtime = tcp_time_stamp; + icsk->icsk_ack.lrcvtime = tcp_jiffies32; if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 717be4de5324..59c32e0086c0 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -447,7 +447,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); newicsk->icsk_rto = TCP_TIMEOUT_INIT; - newicsk->icsk_ack.lrcvtime = tcp_time_stamp; + newicsk->icsk_ack.lrcvtime = tcp_jiffies32; newtp->packets_out = 0; newtp->retrans_out = 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4bd50f0b236b..cbda5de16449 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3324,7 +3324,7 @@ static void tcp_connect_init(struct sock *sk) if (likely(!tp->repair)) tp->rcv_nxt = 0; else - tp->rcv_tstamp = tcp_time_stamp; + tp->rcv_tstamp = tcp_jiffies32; tp->rcv_wup = tp->rcv_nxt; tp->copied_seq = tp->rcv_nxt; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 5f6f219a431e..9e0616cb8c17 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -451,7 +451,7 @@ void tcp_retransmit_timer(struct sock *sk) tp->snd_una, tp->snd_nxt); } #endif - if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { + if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) { tcp_write_err(sk); goto out; } -- cgit v1.2.3 From 9a568de4818dea9a05af141046bd3e589245ab83 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 May 2017 14:00:14 -0700 Subject: tcp: switch TCP TS option (RFC 7323) to 1ms clock TCP Timestamps option is defined in RFC 7323 Traditionally on linux, it has been tied to the internal 'jiffies' variable, because it had been a cheap and good enough generator. For TCP flows on the Internet, 1 ms resolution would be much better than 4ms or 10ms (HZ=250 or HZ=100 respectively) For TCP flows in the DC, Google has used usec resolution for more than two years with great success [1] Receive size autotuning (DRS) is indeed more precise and converges faster to optimal window size. This patch converts tp->tcp_mstamp to a plain u64 value storing a 1 usec TCP clock. This choice will allow us to upstream the 1 usec TS option as discussed in IETF 97. [1] https://www.ietf.org/proceedings/97/slides/slides-97-tcpm-tcp-options-for-low-latency-00.pdf Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/linux/skbuff.h | 62 +------------------------- include/linux/tcp.h | 22 ++++----- include/net/tcp.h | 59 ++++++++++++++++++++---- net/ipv4/syncookies.c | 8 ++-- net/ipv4/tcp.c | 4 +- net/ipv4/tcp_bbr.c | 22 ++++----- net/ipv4/tcp_input.c | 96 ++++++++++++++++++++-------------------- net/ipv4/tcp_ipv4.c | 17 +++---- net/ipv4/tcp_lp.c | 12 ++--- net/ipv4/tcp_minisocks.c | 4 +- net/ipv4/tcp_output.c | 16 +++---- net/ipv4/tcp_rate.c | 16 +++---- net/ipv4/tcp_recovery.c | 23 +++++----- net/ipv4/tcp_timer.c | 8 ++-- net/ipv6/syncookies.c | 2 +- net/ipv6/tcp_ipv6.c | 4 +- net/netfilter/nf_synproxy_core.c | 2 +- 17 files changed, 178 insertions(+), 199 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bfc7892f6c33..7c0cb2ce8b01 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif -/** - * struct skb_mstamp - multi resolution time stamps - * @stamp_us: timestamp in us resolution - * @stamp_jiffies: timestamp in jiffies - */ -struct skb_mstamp { - union { - u64 v64; - struct { - u32 stamp_us; - u32 stamp_jiffies; - }; - }; -}; - -/** - * skb_mstamp_get - get current timestamp - * @cl: place to store timestamps - */ -static inline void skb_mstamp_get(struct skb_mstamp *cl) -{ - u64 val = local_clock(); - - do_div(val, NSEC_PER_USEC); - cl->stamp_us = (u32)val; - cl->stamp_jiffies = (u32)jiffies; -} - -/** - * skb_mstamp_delta - compute the difference in usec between two skb_mstamp - * @t1: pointer to newest sample - * @t0: pointer to oldest sample - */ -static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 delta_us = t1->stamp_us - t0->stamp_us; - u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; - - /* If delta_us is negative, this might be because interval is too big, - * or local_clock() drift is too big : fallback using jiffies. - */ - if (delta_us <= 0 || - delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) - - delta_us = jiffies_to_usecs(delta_jiffies); - - return delta_us; -} - -static inline bool skb_mstamp_after(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; - - if (!diff) - diff = t1->stamp_us - t0->stamp_us; - return diff > 0; -} - /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -646,7 +586,7 @@ struct sk_buff { union { ktime_t tstamp; - struct skb_mstamp skb_mstamp; + u64 skb_mstamp; }; }; struct rb_node rbnode; /* used in netem & tcp stack */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 22854f028434..542ca1ae02c4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -123,7 +123,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct skb_mstamp snt_synack; /* first SYNACK sent time */ + u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; u32 txhash; u32 rcv_isn; @@ -211,7 +211,7 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { - struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u64 mstamp; /* (Re)sent time of the skb */ u32 rtt_us; /* Associated RTT */ u32 end_seq; /* Ending TCP sequence of the skb */ u8 advanced; /* mstamp advanced since last lost marking */ @@ -240,7 +240,7 @@ struct tcp_sock { u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ - struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ + u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ @@ -280,8 +280,8 @@ struct tcp_sock { u32 delivered; /* Total data packets delivered incl. rexmits */ u32 lost; /* Total data packets lost incl. rexmits */ u32 app_limited; /* limited until "delivered" reaches this val */ - struct skb_mstamp first_tx_mstamp; /* start of window send phase */ - struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ + u64 first_tx_mstamp; /* start of window send phase */ + u64 delivered_mstamp; /* time we reached "delivered" */ u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_interval_us; /* saved rate sample: time elapsed */ @@ -335,16 +335,16 @@ struct tcp_sock { /* Receiver side RTT estimation */ struct { - u32 rtt_us; - u32 seq; - struct skb_mstamp time; + u32 rtt_us; + u32 seq; + u64 time; } rcv_rtt_est; /* Receiver queue space */ struct { - int space; - u32 seq; - struct skb_mstamp time; + int space; + u32 seq; + u64 time; } rcvq_space; /* TCP-specific MTU probe information. */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b2932b8363f..82462db97183 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void) u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); -__u32 cookie_init_timestamp(struct request_sock *req); +u64 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_ecn_ok(const struct tcp_options_received *opt, const struct net *net, const struct dst_entry *dst); @@ -706,14 +706,55 @@ void tcp_send_window_probe(struct sock *sk); */ #define tcp_jiffies32 ((u32)jiffies) -/* Generator for TCP TS option (RFC 7323) - * Currently tied to 'jiffies' but will soon be driven by 1 ms clock. +/* + * Deliver a 32bit value for TCP timestamp option (RFC 7323) + * It is no longer tied to jiffies, but to 1 ms clock. + * Note: double check if you want to use tcp_jiffies32 instead of this. + */ +#define TCP_TS_HZ 1000 + +static inline u64 tcp_clock_ns(void) +{ + return local_clock(); +} + +static inline u64 tcp_clock_us(void) +{ + return div_u64(tcp_clock_ns(), NSEC_PER_USEC); +} + +/* This should only be used in contexts where tp->tcp_mstamp is up to date */ +static inline u32 tcp_time_stamp(const struct tcp_sock *tp) +{ + return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); +} + +/* Could use tcp_clock_us() / 1000, but this version uses a single divide */ +static inline u32 tcp_time_stamp_raw(void) +{ + return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); +} + + +/* Refresh 1us clock of a TCP socket, + * ensuring monotically increasing values. */ -#define tcp_time_stamp ((__u32)(jiffies)) +static inline void tcp_mstamp_refresh(struct tcp_sock *tp) +{ + u64 val = tcp_clock_us(); + + if (val > tp->tcp_mstamp) + tp->tcp_mstamp = val; +} + +static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) +{ + return max_t(s64, t1 - t0, 0); +} static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) { - return skb->skb_mstamp.stamp_jiffies; + return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); } @@ -778,9 +819,9 @@ struct tcp_skb_cb { /* pkts S/ACKed so far upon tx of skb, incl retrans: */ __u32 delivered; /* start of send pipeline phase */ - struct skb_mstamp first_tx_mstamp; + u64 first_tx_mstamp; /* when we reached the "delivered" count */ - struct skb_mstamp delivered_mstamp; + u64 delivered_mstamp; } tx; /* only used for outgoing skbs */ union { struct inet_skb_parm h4; @@ -896,7 +937,7 @@ struct ack_sample { * A sample is invalid if "delivered" or "interval_us" is negative. */ struct rate_sample { - struct skb_mstamp prior_mstamp; /* starting timestamp for interval */ + u64 prior_mstamp; /* starting timestamp for interval */ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ long interval_us; /* time for tp->delivered to incr "delivered" */ @@ -1862,7 +1903,7 @@ void tcp_init(void); /* tcp_recovery.c */ extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, - const struct skb_mstamp *xmit_time); + u64 xmit_time); extern void tcp_rack_reo_timeout(struct sock *sk); /* diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 0257d965f111..6426250a58ea 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, * Since subsequent timestamps use the normal tcp_time_stamp value, we * must make sure that the resulting initial timestamp is <= tcp_time_stamp. */ -__u32 cookie_init_timestamp(struct request_sock *req) +u64 cookie_init_timestamp(struct request_sock *req) { struct inet_request_sock *ireq; - u32 ts, ts_now = tcp_time_stamp; + u32 ts, ts_now = tcp_time_stamp_raw(); u32 options = 0; ireq = inet_rsk(req); @@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req) ts <<= TSBITS; ts |= options; } - return ts; + return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ); } @@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - treq->snt_synack.v64 = 0; + treq->snt_synack = 0; treq->tfo_listener = false; ireq->ir_iif = inet_request_bound_dev_if(sk, skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 850054800526..b5d18484746d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (!tp->repair) err = -EPERM; else - tp->tsoffset = val - tcp_time_stamp; + tp->tsoffset = val - tcp_time_stamp_raw(); break; case TCP_REPAIR_WINDOW: err = tcp_repair_set_window(tp, optval, optlen); @@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, break; case TCP_TIMESTAMP: - val = tcp_time_stamp + tp->tsoffset; + val = tcp_time_stamp_raw() + tp->tsoffset; break; case TCP_NOTSENT_LOWAT: val = tp->notsent_lowat; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 40dc4fc5f6ac..dbcc9352a48f 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -91,7 +91,7 @@ struct bbr { struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ u32 rtt_cnt; /* count of packet-timed rounds elapsed */ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ - struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */ + u64 cycle_mstamp; /* time of this cycle phase start */ u32 mode:3, /* current bbr_mode in state machine */ prev_ca_state:3, /* CA state on previous ACK */ packet_conservation:1, /* use packet conservation? */ @@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk, struct tcp_sock *tp = tcp_sk(sk); struct bbr *bbr = inet_csk_ca(sk); bool is_full_length = - skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) > + tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) > bbr->min_rtt_us; u32 inflight, bw; @@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct bbr *bbr = inet_csk_ca(sk); - bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies; + bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC); bbr->lt_last_delivered = tp->delivered; bbr->lt_last_lost = tp->lost; bbr->lt_rtt_cnt = 0; @@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) struct bbr *bbr = inet_csk_ca(sk); u32 lost, delivered; u64 bw; - s32 t; + u32 t; if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ if (bbr->mode == BBR_PROBE_BW && bbr->round_start && @@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) return; /* Find average delivery rate in this sampling interval. */ - t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp); - if (t < 1) - return; /* interval is less than one jiffy, so wait */ - t = jiffies_to_usecs(t); - /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */ - if (t < 1) { + t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp; + if ((s32)t < 1) + return; /* interval is less than one ms, so wait */ + /* Check if can multiply without overflow */ + if (t >= ~0U / USEC_PER_MSEC) { bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ return; } + t *= USEC_PER_MSEC; bw = (u64)delivered * BW_UNIT; do_div(bw, t); bbr_lt_bw_interval_done(sk, bw); @@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk) bbr->idle_restart = 0; bbr->full_bw = 0; bbr->full_bw_cnt = 0; - bbr->cycle_mstamp.v64 = 0; + bbr->cycle_mstamp = 0; bbr->cycle_idx = 0; bbr_reset_lt_bw_sampling(sk); bbr_reset_startup_mode(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 10e6775464f6..9a5a9e8eda89 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -441,7 +441,7 @@ void tcp_init_buffer_space(struct sock *sk) tcp_sndbuf_expand(sk); tp->rcvq_space.space = tp->rcv_wnd; - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.seq = tp->copied_seq; @@ -555,11 +555,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { u32 delta_us; - if (tp->rcv_rtt_est.time.v64 == 0) + if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; - delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time); + delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); tcp_rcv_rtt_update(tp, delta_us, 1); new_measure: @@ -571,13 +571,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) - tcp_rcv_rtt_update(tp, - jiffies_to_usecs(tcp_time_stamp - - tp->rx_opt.rcv_tsecr), - 0); + TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { + u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; + u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); + + tcp_rcv_rtt_update(tp, delta_us, 0); + } } /* @@ -590,7 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk) int time; int copied; - time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time); + time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) return; @@ -1134,8 +1136,8 @@ struct tcp_sacktag_state { * that was SACKed. RTO needs the earliest RTT to stay conservative, * but congestion control should still get an accurate delay signal. */ - struct skb_mstamp first_sackt; - struct skb_mstamp last_sackt; + u64 first_sackt; + u64 last_sackt; struct rate_sample *rate; int flag; }; @@ -1200,7 +1202,7 @@ static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, - const struct skb_mstamp *xmit_time) + u64 xmit_time) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; @@ -1242,9 +1244,9 @@ static u8 tcp_sacktag_one(struct sock *sk, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; - if (state->first_sackt.v64 == 0) - state->first_sackt = *xmit_time; - state->last_sackt = *xmit_time; + if (state->first_sackt == 0) + state->first_sackt = xmit_time; + state->last_sackt = xmit_time; } if (sacked & TCPCB_LOST) { @@ -1304,7 +1306,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount, - &skb->skb_mstamp); + skb->skb_mstamp); tcp_rate_skb_delivered(sk, skb, state->rate); if (skb == tp->lost_skb_hint) @@ -1356,8 +1358,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, tcp_advance_highest_sack(sk, skb); tcp_skb_collapse_tstamp(prev, skb); - if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64)) - TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0; + if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp)) + TCP_SKB_CB(prev)->tx.delivered_mstamp = 0; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); @@ -1587,7 +1589,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb), - &skb->skb_mstamp); + skb->skb_mstamp); tcp_rate_skb_delivered(sk, skb, state->rate); if (!before(TCP_SKB_CB(skb)->seq, @@ -2936,9 +2938,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, * See draft-ietf-tcplw-high-performance-00, section 3.3. */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && - flag & FLAG_ACKED) - seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - - tp->rx_opt.rcv_tsecr); + flag & FLAG_ACKED) { + u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; + u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); + + seq_rtt_us = ca_rtt_us = delta_us; + } if (seq_rtt_us < 0) return false; @@ -2960,12 +2965,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) { long rtt_us = -1L; - if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { - struct skb_mstamp now; - - skb_mstamp_get(&now); - rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack); - } + if (req && !req->num_retrans && tcp_rsk(req)->snt_synack) + rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack); tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); } @@ -3003,7 +3004,7 @@ void tcp_rearm_rto(struct sock *sk) struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = tcp_skb_timestamp(skb) + rto; - s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); + s32 delta = (s32)(rto_time_stamp - tcp_jiffies32); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ @@ -3060,9 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, struct tcp_sacktag_state *sack) { const struct inet_connection_sock *icsk = inet_csk(sk); - struct skb_mstamp first_ackt, last_ackt; + u64 first_ackt, last_ackt; struct tcp_sock *tp = tcp_sk(sk); - struct skb_mstamp *now = &tp->tcp_mstamp; u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; bool fully_acked = true; @@ -3075,7 +3075,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, bool rtt_update; int flag = 0; - first_ackt.v64 = 0; + first_ackt = 0; while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); @@ -3106,8 +3106,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, flag |= FLAG_RETRANS_DATA_ACKED; } else if (!(sacked & TCPCB_SACKED_ACKED)) { last_ackt = skb->skb_mstamp; - WARN_ON_ONCE(last_ackt.v64 == 0); - if (!first_ackt.v64) + WARN_ON_ONCE(last_ackt == 0); + if (!first_ackt) first_ackt = last_ackt; last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; @@ -3122,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, sacked, scb->end_seq, - &skb->skb_mstamp); + skb->skb_mstamp); } if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; @@ -3165,13 +3165,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; - if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { - seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt); - ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt); + if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) { + seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); + ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); } - if (sack->first_sackt.v64) { - sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt); - ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt); + if (sack->first_sackt) { + sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); + ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); } sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, @@ -3201,7 +3201,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tp->fackets_out -= min(pkts_acked, tp->fackets_out); } else if (skb && rtt_update && sack_rtt_us >= 0 && - sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) { + sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { /* Do not re-arm RTO if the sack RTT is measured from data sent * after when the head was last (re)transmitted. Otherwise the * timeout may continue to extend in loss recovery. @@ -3553,7 +3553,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int acked = 0; /* Number of packets newly acked */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ - sack_state.first_sackt.v64 = 0; + sack_state.first_sackt = 0; sack_state.rate = &rs; /* We very likely will need to access write queue head. */ @@ -5356,7 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* @@ -5672,7 +5672,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, - tcp_time_stamp)) { + tcp_time_stamp(tp))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; @@ -5917,7 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_SYN_SENT: tp->rx_opt.saw_tstamp = 0; - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); queued = tcp_rcv_synsent_state_process(sk, skb, th); if (queued >= 0) return queued; @@ -5929,7 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) return 0; } - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); tp->rx_opt.saw_tstamp = 0; req = tp->fastopen_rsk; if (req) { @@ -6202,7 +6202,7 @@ static void tcp_openreq_init(struct request_sock *req, req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; - skb_mstamp_get(&tcp_rsk(req)->snt_synack); + tcp_rsk(req)->snt_synack = tcp_clock_us(); tcp_rsk(req)->last_oow_ack_time = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d8fe25db79f2..191b2f78b19d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) struct sock *sk; struct sk_buff *skb; struct request_sock *fastopen; - __u32 seq, snd_una; - __u32 remaining; + u32 seq, snd_una; + s32 remaining; + u32 delta_us; int err; struct net *net = dev_net(icmp_skb->dev); @@ -483,12 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) skb = tcp_write_queue_head(sk); BUG_ON(!skb); - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); + delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); remaining = icsk->icsk_rto - - min(icsk->icsk_rto, - tcp_time_stamp - tcp_skb_timestamp(skb)); + usecs_to_jiffies(delta_us); - if (remaining) { + if (remaining > 0) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { @@ -812,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, - tcp_time_stamp + tcptw->tw_ts_offset, + tcp_time_stamp_raw() + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), @@ -840,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_v4_send_ack(sk, skb, seq, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, - tcp_time_stamp + tcp_rsk(req)->ts_off, + tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, 0, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index ef3122abb373..ae10ed64fe13 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -37,7 +37,7 @@ #include /* resolution of owd */ -#define LP_RESOL 1000 +#define LP_RESOL TCP_TS_HZ /** * enum tcp_lp_state @@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk) tp->rx_opt.rcv_tsecr == lp->local_ref_time) goto out; - m = HZ * (tp->rx_opt.rcv_tsval - - lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr - - lp->local_ref_time); + m = TCP_TS_HZ * + (tp->rx_opt.rcv_tsval - lp->remote_ref_time) / + (tp->rx_opt.rcv_tsecr - lp->local_ref_time); if (m < 0) m = -m; @@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk) if (lp->flag & LP_VALID_RHZ) { owd = tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) - - tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ); + tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ); if (owd < 0) owd = -owd; } @@ -264,7 +264,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) { struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); - u32 now = tcp_time_stamp; + u32 now = tcp_time_stamp(tp); u32 delta; if (sample->rtt_us > 0) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6504f1082bdf..d0642df73044 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->fackets_out = 0; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; newtp->tlp_high_seq = 0; - newtp->lsndtime = treq->snt_synack.stamp_jiffies; + newtp->lsndtime = tcp_jiffies32; newsk->sk_txhash = treq->txhash; newtp->last_oow_ack_time = 0; newtp->total_retrans = req->num_retrans; @@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->fastopen_req = NULL; newtp->fastopen_rsk = NULL; newtp->syn_data_acked = 0; - newtp->rack.mstamp.v64 = 0; + newtp->rack.mstamp = 0; newtp->rack.advanced = 0; __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 65472e931a0b..478f75baee31 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1962,7 +1962,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, head = tcp_write_queue_head(sk); - age = skb_mstamp_us_delta(&tp->tcp_mstamp, &head->skb_mstamp); + age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); /* If next ACK is likely to come too late (half srtt), do not defer */ if (age < (tp->srtt_us >> 4)) goto send_now; @@ -2279,7 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } max_segs = tcp_tso_segs(sk, mss_now); - skb_mstamp_get(&tp->tcp_mstamp); + tcp_mstamp_refresh(tp); while ((skb = tcp_send_head(sk))) { unsigned int limit; @@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); - skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); + tcp_mstamp_refresh(tcp_sk(sk)); /* Send it off. */ if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); @@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) - skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); + skb->skb_mstamp = cookie_init_timestamp(req); else #endif - skb_mstamp_get(&skb->skb_mstamp); + skb->skb_mstamp = tcp_clock_us(); #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); @@ -3453,8 +3453,8 @@ int tcp_connect(struct sock *sk) return -ENOBUFS; tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); - skb_mstamp_get(&tp->tcp_mstamp); - tp->retrans_stamp = tp->tcp_mstamp.stamp_jiffies; + tcp_mstamp_refresh(tp); + tp->retrans_stamp = tcp_time_stamp(tp); tcp_connect_queue_skb(sk, buff); tcp_ecn_send_syn(sk, buff); @@ -3615,7 +3615,7 @@ void tcp_send_window_probe(struct sock *sk) { if (sk->sk_state == TCP_ESTABLISHED) { tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; - skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); + tcp_mstamp_refresh(tcp_sk(sk)); tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); } } diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index c6a9fa894646..ad99569d4c1e 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *scb = TCP_SKB_CB(skb); - if (!scb->tx.delivered_mstamp.v64) + if (!scb->tx.delivered_mstamp) return; if (!rs->prior_delivered || @@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, rs->is_retrans = scb->sacked & TCPCB_RETRANS; /* Find the duration of the "send phase" of this window: */ - rs->interval_us = skb_mstamp_us_delta( - &skb->skb_mstamp, - &scb->tx.first_tx_mstamp); + rs->interval_us = tcp_stamp_us_delta( + skb->skb_mstamp, + scb->tx.first_tx_mstamp); /* Record send time of most recently ACKed packet: */ tp->first_tx_mstamp = skb->skb_mstamp; @@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, * we don't need to reset since it'll be freed soon. */ if (scb->sacked & TCPCB_SACKED_ACKED) - scb->tx.delivered_mstamp.v64 = 0; + scb->tx.delivered_mstamp = 0; } /* Update the connection delivery information and generate a rate sample. */ @@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ rs->losses = lost; /* freshly marked lost */ /* Return an invalid sample if no timing information is available. */ - if (!rs->prior_mstamp.v64) { + if (!rs->prior_mstamp) { rs->delivered = -1; rs->interval_us = -1; return; @@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, * longer phase. */ snd_us = rs->interval_us; /* send phase */ - ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp, - &rs->prior_mstamp); /* ack phase */ + ack_us = tcp_stamp_us_delta(tp->tcp_mstamp, + rs->prior_mstamp); /* ack phase */ rs->interval_us = max(snd_us, ack_us); /* Normally we expect interval_us >= min-rtt. diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index cd72b3d3879e..fe9a493d0208 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) } } -static bool tcp_rack_sent_after(const struct skb_mstamp *t1, - const struct skb_mstamp *t2, - u32 seq1, u32 seq2) +static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) { - return skb_mstamp_after(t1, t2) || - (t1->v64 == t2->v64 && after(seq1, seq2)); + return t1 > t2 || (t1 == t2 && after(seq1, seq2)); } /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): @@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) scb->sacked & TCPCB_SACKED_ACKED) continue; - if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp, + if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, tp->rack.end_seq, scb->end_seq)) { /* Step 3 in draft-cheng-tcpm-rack-00.txt: * A packet is lost if its elapsed time is beyond * the recent RTT plus the reordering window. */ - u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp, - &skb->skb_mstamp); + u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp, + skb->skb_mstamp); s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; if (remaining < 0) { @@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk) * draft-cheng-tcpm-rack-00.txt */ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, - const struct skb_mstamp *xmit_time) + u64 xmit_time) { u32 rtt_us; - if (tp->rack.mstamp.v64 && - !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp, + if (tp->rack.mstamp && + !tcp_rack_sent_after(xmit_time, tp->rack.mstamp, end_seq, tp->rack.end_seq)) return; - rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time); + rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); if (sacked & TCPCB_RETRANS) { /* If the sacked packet was retransmitted, it's ambiguous * whether the retransmission or the original (or the prior @@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, return; } tp->rack.rtt_us = rtt_us; - tp->rack.mstamp = *xmit_time; + tp->rack.mstamp = xmit_time; tp->rack.end_seq = end_seq; tp->rack.advanced = 1; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6629f47aa7f0..27a667bce806 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk, unsigned int timeout, bool syn_set) { - unsigned int linear_backoff_thresh, start_ts; unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; + unsigned int linear_backoff_thresh, start_ts; if (!inet_csk(sk)->icsk_retransmits) return false; @@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk, timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; } - return (tcp_time_stamp - start_ts) >= timeout; + return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout); } /* A write timeout has occurred. Process the after effects. */ @@ -341,7 +341,7 @@ static void tcp_probe_timer(struct sock *sk) if (!start_ts) tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp; else if (icsk->icsk_user_timeout && - (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) + (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout) goto abort; max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; @@ -561,7 +561,7 @@ void tcp_write_timer_handler(struct sock *sk) goto out; } - skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); + tcp_mstamp_refresh(tcp_sk(sk)); event = icsk->icsk_pending; switch (event) { diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5abc3692b901..971823359f5b 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - treq->snt_synack.v64 = 0; + treq->snt_synack = 0; treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; treq->ts_off = 0; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4f4310a36a04..233edfabe1db 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, - tcp_time_stamp + tcptw->tw_ts_offset, + tcp_time_stamp_raw() + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); @@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, - tcp_time_stamp + tcp_rsk(req)->ts_off, + tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index a504e87c6ddf..49bd8bb16b18 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, struct synproxy_options *opts) { opts->tsecr = opts->tsval; - opts->tsval = tcp_time_stamp & ~0x3f; + opts->tsval = tcp_time_stamp_raw() & ~0x3f; if (opts->options & XT_SYNPROXY_OPT_WSCALE) { opts->tsval |= opts->wscale; -- cgit v1.2.3 From 33c35aa4817864e056fd772230b0c6b552e36ea2 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 15 May 2017 09:34:06 -0400 Subject: cgroup: Prevent kill_css() from being called more than once The kill_css() function may be called more than once under the condition that the css was killed but not physically removed yet followed by the removal of the cgroup that is hosting the css. This patch prevents any harmm from being done when that happens. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org # v4.5+ --- include/linux/cgroup-defs.h | 1 + kernel/cgroup/cgroup.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 21745946cae1..ec47101cb1bf 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -48,6 +48,7 @@ enum { CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ + CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c3c9a0e1b3c9..8d4e85eae42c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css) { lockdep_assert_held(&cgroup_mutex); + if (css->flags & CSS_DYING) + return; + + css->flags |= CSS_DYING; + /* * This must happen before css is disassociated with its cgroup. * See seq_css() for details. -- cgit v1.2.3 From 30e7d894c1478c88d50ce94ddcdbd7f9763d9cdd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 17 May 2017 10:19:49 +0200 Subject: tracing/kprobes: Enforce kprobes teardown after testing Enabling the tracer selftest triggers occasionally the warning in text_poke(), which warns when the to be modified page is not marked reserved. The reason is that the tracer selftest installs kprobes on functions marked __init for testing. These probes are removed after the tests, but that removal schedules the delayed kprobes_optimizer work, which will do the actual text poke. If the work is executed after the init text is freed, then the warning triggers. The bug can be reproduced reliably when the work delay is increased. Flush the optimizer work and wait for the optimizing/unoptimizing lists to become empty before returning from the kprobes tracer selftest. That ensures that all operations which were queued due to the probes removal have completed. Link: http://lkml.kernel.org/r/20170516094802.76a468bb@gandalf.local.home Signed-off-by: Thomas Gleixner Acked-by: Masami Hiramatsu Cc: stable@vger.kernel.org Fixes: 6274de498 ("kprobes: Support delayed unoptimizing") Signed-off-by: Steven Rostedt (VMware) --- include/linux/kprobes.h | 3 +++ kernel/kprobes.c | 2 +- kernel/trace/trace_kprobe.c | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 30f90c1a0aaf..541df0b5b815 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif +extern void wait_for_kprobe_optimizer(void); +#else +static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 7367e0ec6f81..199243bba554 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -595,7 +595,7 @@ static void kprobe_optimizer(struct work_struct *work) } /* Wait for completing optimization and unoptimization */ -static void wait_for_kprobe_optimizer(void) +void wait_for_kprobe_optimizer(void) { mutex_lock(&kprobe_mutex); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8485f6738a87..c129fca6ec99 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1535,6 +1535,11 @@ static __init int kprobe_trace_self_tests_init(void) end: release_all_trace_kprobes(); + /* + * Wait for the optimizer work to finish. Otherwise it might fiddle + * with probes in already freed __init text. + */ + wait_for_kprobe_optimizer(); if (warn) pr_cont("NG: Some tests are failed. Please check them.\n"); else -- cgit v1.2.3 From 552c9f47f8d451830a6b47151c6d2db77f77cc3e Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 17 May 2017 13:12:51 +0200 Subject: KVM: arm/arm64: Fix bug when registering redist iodevs If userspace creates the VCPUs after initializing the VGIC, then we end up in a situation where we trigger a bug in kvm_vcpu_get_idx(), because it is called prior to adding the VCPU into the vcpus array on the VM. There is no tight coupling between the VCPU index and the area of the redistributor region used for the VCPU, so we can simply ensure that all creations of redistributors are serialized per VM, and increment an offset when we successfully add a redistributor. The vgic_register_redist_iodev() function can be called from two paths: vgic_redister_all_redist_iodev() which is called via the kvm_vgic_addr() device attribute handler. This patch already holds the kvm->lock mutex. The other path is via kvm_vgic_vcpu_init, which is called through a longer chain from kvm_vm_ioctl_create_vcpu(), which releases the kvm->lock mutex just before calling kvm_arch_vcpu_create(), so we can simply take this mutex again later for our purposes. Fixes: ab6f468c10 ("KVM: arm/arm64: Register iodevs when setting redist base and creating VCPUs") Signed-off-by: Christoffer Dall Tested-by: Jean-Philippe Brucker Reviewed-by: Eric Auger --- include/kvm/arm_vgic.h | 5 ++++- virt/kvm/arm/vgic/vgic-init.c | 5 ++++- virt/kvm/arm/vgic/vgic-mmio-v3.c | 9 ++++++--- 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 97b8d3728b31..ef718586321c 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -195,7 +195,10 @@ struct vgic_dist { /* either a GICv2 CPU interface */ gpa_t vgic_cpu_base; /* or a number of GICv3 redistributor regions */ - gpa_t vgic_redist_base; + struct { + gpa_t vgic_redist_base; + gpa_t vgic_redist_free_offset; + }; }; /* distributor enabled */ diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index dc68e2e424ab..3a0b8999f011 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) * If we are creating a VCPU with a GICv3 we must also register the * KVM io device for the redistributor that belongs to this VCPU. */ - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + mutex_lock(&vcpu->kvm->lock); ret = vgic_register_redist_iodev(vcpu); + mutex_unlock(&vcpu->kvm->lock); + } return ret; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 99da1a207c19..9b0f6810e7a8 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -586,7 +586,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) if (!vgic_v3_check_base(kvm)) return -EINVAL; - rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; + rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset; sgi_base = rd_base + SZ_64K; kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); @@ -615,11 +615,14 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, SZ_64K, &sgi_dev->dev); mutex_unlock(&kvm->slots_lock); - if (ret) + if (ret) { kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &rd_dev->dev); + return ret; + } - return ret; + vgic->vgic_redist_free_offset += 2 * SZ_64K; + return 0; } static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 9756d33b852a17ee67539545c2acf8dc3bda4574 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 1 May 2017 23:54:17 -0700 Subject: Bluetooth: Enable LE Channel Selection Algorithm event If the Channel Selection Algorithm #2 feature is supported, then enable the new LE Channel Selection Algorithm event. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci.h | 1 + net/bluetooth/hci_core.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 99aa5e5e3100..dd43cfdd443a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -399,6 +399,7 @@ enum { #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_EXT_SCAN_POLICY 0x80 +#define HCI_LE_CHAN_SEL_ALG2 0x40 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4a0cac774107..e58b9034afff 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -635,6 +635,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) * Report */ + /* If the controller supports Channel Selection Algorithm #2 + * feature, enable the corresponding event. + */ + if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) + events[2] |= 0x08; /* LE Channel Selection + * Algorithm + */ + /* If the controller supports the LE Set Scan Enable command, * enable the corresponding advertising report event. */ -- cgit v1.2.3 From de2ba3039cfb61334b2523677cc032422873ff93 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 1 May 2017 23:54:19 -0700 Subject: Bluetooth: Set LE Default PHY preferences If the LE Set Default PHY command is supported, the indicate to the controller that the host has no preferences for transmitter PHY or receiver PHY selection. Issuing this command gives the controller a clear indication that other PHY can be selected if available. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci.h | 7 +++++++ net/bluetooth/hci_core.c | 12 ++++++++++++ 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index dd43cfdd443a..fe98f0a5bef0 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1499,6 +1499,13 @@ struct hci_rp_le_read_max_data_len { __le16 rx_time; } __packed; +#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031 +struct hci_cp_le_set_default_phy { + __u8 all_phys; + __u8 tx_phys; + __u8 rx_phys; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 88a616a2b959..43fecd59dfef 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -794,6 +794,18 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); } + /* Set Default PHY parameters if command is supported */ + if (hdev->commands[35] & 0x20) { + struct hci_cp_le_set_default_phy cp; + + /* No transmitter PHY or receiver PHY preferences */ + cp.all_phys = 0x03; + cp.tx_phys = 0; + cp.rx_phys = 0; + + hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); + } + return 0; } -- cgit v1.2.3 From 6a29beef9d1b16c762e469d77e28c3de3f5c3dbb Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Wed, 17 May 2017 18:32:02 +0300 Subject: usb: host: xhci-ring: don't need to clear interrupt pending for MSI enabled hcd According to xHCI spec Figure 30: Interrupt Throttle Flow Diagram If PCI Message Signaled Interrupts (MSI or MSI-X) are enabled, then the assertion of the Interrupt Pending (IP) flag in Figure 30 generates a PCI Dword write. The IP flag is automatically cleared by the completion of the PCI write. the MSI enabled HCs don't need to clear interrupt pending bit, but hcd->irq = 0 doesn't equal to MSI enabled HCD. At some Dual-role controller software designs, it sets hcd->irq as 0 to avoid HCD requesting interrupt, and they want to decide when to call usb_hcd_irq by software. Signed-off-by: Peter Chen Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 5 +---- drivers/usb/host/xhci.c | 5 +++-- include/linux/usb/hcd.h | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 507ba7734b94..0830b25f9499 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2707,12 +2707,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) */ status |= STS_EINT; writel(status, &xhci->op_regs->status); - /* FIXME when MSI-X is supported and there are multiple vectors */ - /* Clear the MSI-X event interrupt status */ - if (hcd->irq) { + if (!hcd->msi_enabled) { u32 irq_pending; - /* Acknowledge the PCI interrupt */ irq_pending = readl(&xhci->ir_set->irq_pending); irq_pending |= IMAN_IP; writel(irq_pending, &xhci->ir_set->irq_pending); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2d1310220832..71eb2991c698 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -359,9 +359,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) /* fall back to msi*/ ret = xhci_setup_msi(xhci); - if (!ret) - /* hcd->irq is 0, we have MSI */ + if (!ret) { + hcd->msi_enabled = 1; return 0; + } if (!pdev->irq) { xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a469999a106d..50398b69ca44 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -148,6 +148,7 @@ struct usb_hcd { unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ + unsigned msi_enabled:1; /* driver has MSI enabled? */ unsigned remove_phy:1; /* auto-remove USB phy */ /* The next flag is a stopgap, to be removed when all the HCDs -- cgit v1.2.3 From 64df6d525fcff1630098db9238bfd2b3e092d5c1 Mon Sep 17 00:00:00 2001 From: linzhang Date: Wed, 17 May 2017 12:05:07 +0800 Subject: net: x25: fix one potential use-after-free issue The function x25_init is not properly unregister related resources on error handler.It is will result in kernel oops if x25_init init failed, so add properly unregister call on error handler. Also, i adjust the coding style and make x25_register_sysctl properly return failure. Signed-off-by: linzhang Signed-off-by: David S. Miller --- include/net/x25.h | 4 ++-- net/x25/af_x25.c | 24 ++++++++++++++++-------- net/x25/sysctl_net_x25.c | 5 ++++- 3 files changed, 22 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/x25.h b/include/net/x25.h index c383aa4edbf0..6d30a01d281d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *); /* sysctl_net_x25.c */ #ifdef CONFIG_SYSCTL -void x25_register_sysctl(void); +int x25_register_sysctl(void); void x25_unregister_sysctl(void); #else -static inline void x25_register_sysctl(void) {}; +static inline int x25_register_sysctl(void) { return 0; }; static inline void x25_unregister_sysctl(void) {}; #endif /* CONFIG_SYSCTL */ diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 8b911c29860e..5a1a98df3499 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb) static int __init x25_init(void) { - int rc = proto_register(&x25_proto, 0); + int rc; - if (rc != 0) + rc = proto_register(&x25_proto, 0); + if (rc) goto out; rc = sock_register(&x25_family_ops); - if (rc != 0) + if (rc) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); - if (rc != 0) + if (rc) goto out_sock; - pr_info("Linux Version 0.2\n"); + rc = x25_register_sysctl(); + if (rc) + goto out_dev; - x25_register_sysctl(); rc = x25_proc_init(); - if (rc != 0) - goto out_dev; + if (rc) + goto out_sysctl; + + pr_info("Linux Version 0.2\n"); + out: return rc; +out_sysctl: + x25_unregister_sysctl(); out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: + dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index a06dfe143c67..ba078c85f0a1 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c @@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = { { }, }; -void __init x25_register_sysctl(void) +int __init x25_register_sysctl(void) { x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); + if (!x25_table_header) + return -ENOMEM; + return 0; } void x25_unregister_sysctl(void) -- cgit v1.2.3 From 197a5212c3dd70be267b5cd930be0fb68bb53018 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 17 May 2017 12:14:37 +0800 Subject: ptr_ring: add ptr_ring_unconsume Applications that consume a batch of entries in one go can benefit from ability to return some of them back into the ring. Add an API for that - assuming there's space. If there's no space naturally can't do this and have to drop entries, but this implies ring is full so we'd likely drop some anyway. Signed-off-by: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) (limited to 'include') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6b2e0dd88569..796b90f6d4e9 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -403,6 +403,61 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) return 0; } +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = r->consumer_head = head; + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) -- cgit v1.2.3 From 3acb696015a222f4b25c1b5dce4e36b2d4980da6 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 17 May 2017 12:14:38 +0800 Subject: skb_array: introduce skb_array_unconsume Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/skb_array.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index f4dfade428f0..79850b638bf2 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -156,6 +156,12 @@ static void __skb_array_destroy_skb(void *ptr) kfree_skb(ptr); } +static inline void skb_array_unconsume(struct skb_array *a, + struct sk_buff **skbs, int n) +{ + ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); +} + static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) { return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); -- cgit v1.2.3 From 728fc8d5532b956f9c4b48dff0577fb722251343 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 17 May 2017 12:14:39 +0800 Subject: ptr_ring: introduce batch dequeuing This patch introduce a batched version of consuming, consumer can dequeue more than one pointers from the ring at a time. We don't care about the reorder of reading here so no need for compiler barrier. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'include') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 796b90f6d4e9..d8c97ec8a8e6 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -278,6 +278,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -328,6 +344,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. -- cgit v1.2.3 From 3528c1a52e7af001e0e387fcb6bac2bdb3775d3e Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 17 May 2017 12:14:40 +0800 Subject: skb_array: introduce batch dequeuing Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/skb_array.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include') diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 79850b638bf2..35226cd4efb0 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a) return ptr_ring_consume(&a->ring); } +static inline int skb_array_consume_batched(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) { return ptr_ring_consume_irq(&a->ring); } +static inline int skb_array_consume_batched_irq(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) { return ptr_ring_consume_any(&a->ring); } +static inline int skb_array_consume_batched_any(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); +} + + static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) { return ptr_ring_consume_bh(&a->ring); } +static inline int skb_array_consume_batched_bh(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); +} + static inline int __skb_array_len_with_tag(struct sk_buff *skb) { if (likely(skb)) { -- cgit v1.2.3 From 83339c6b159ea6429a1db40b0d9d1083ab574733 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 17 May 2017 12:14:41 +0800 Subject: tun: export skb_array This patch exports skb_array through tun_get_skb_array(). Caller can then manipulate skb array directly. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 13 +++++++++++++ include/linux/if_tun.h | 5 +++++ 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bbd707b9ef7a..3cbfc5c707e3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2626,6 +2626,19 @@ struct socket *tun_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_socket); +struct skb_array *tun_get_skb_array(struct file *file) +{ + struct tun_file *tfile; + + if (file->f_op != &tun_fops) + return ERR_PTR(-EINVAL); + tfile = file->private_data; + if (!tfile) + return ERR_PTR(-EBADFD); + return &tfile->tx_array; +} +EXPORT_SYMBOL_GPL(tun_get_skb_array); + module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index ed6da2e6df90..bf9bdf42d577 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -19,6 +19,7 @@ #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); +struct skb_array *tun_get_skb_array(struct file *file); #else #include #include @@ -28,5 +29,9 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tun_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ -- cgit v1.2.3 From 49f96fd0cb3808e5ff96573f28b3dceb16eb6998 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 17 May 2017 12:14:42 +0800 Subject: tap: export skb_array This patch exports skb_array through tap_get_skb_array(). Caller can then manipulate skb array directly. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tap.c | 13 +++++++++++++ include/linux/if_tap.h | 5 +++++ 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 4d4173d25dd0..abdaf867774d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1193,6 +1193,19 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); +struct skb_array *tap_get_skb_array(struct file *file) +{ + struct tap_queue *q; + + if (file->f_op != &tap_fops) + return ERR_PTR(-EINVAL); + q = file->private_data; + if (!q) + return ERR_PTR(-EBADFD); + return &q->skb_array; +} +EXPORT_SYMBOL_GPL(tap_get_skb_array); + int tap_queue_resize(struct tap_dev *tap) { struct net_device *dev = tap->dev; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3482c3c2037d..4837157da0dc 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -3,6 +3,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); +struct skb_array *tap_get_skb_array(struct file *file); #else #include #include @@ -12,6 +13,10 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tap_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TAP */ #include -- cgit v1.2.3 From 0cd2950357e31a96be03b531b4b11fe1df812c9f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 17 May 2017 13:30:44 +0300 Subject: net: make struct net_device::tx_queue_len unsigned int 4 billion packet queue is something unthinkable so use 32-bit value for now. Space savings on x86_64: add/remove: 0/0 grow/shrink: 3/70 up/down: 16/-131 (-115) function old new delta change_tx_queue_len 94 108 +14 qdisc_create 1176 1177 +1 alloc_netdev_mqs 1124 1125 +1 xenvif_alloc 533 532 -1 x25_asy_setup 167 166 -1 ... tun_queue_resize 945 940 -5 pfifo_fast_enqueue 167 162 -5 qfq_init_qdisc 168 158 -10 tap_queue_resize 810 799 -11 transmit 719 698 -21 Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- drivers/net/wan/hdlc_raw_eth.c | 3 ++- include/linux/netdevice.h | 2 +- net/core/net-sysfs.c | 8 ++++++-- net/core/rtnetlink.c | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 2f11836078ab..8bd3ed905813 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -57,7 +57,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); - int result, old_qlen; + unsigned int old_qlen; + int result; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f39d27decf4..0150b2dd3031 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1824,7 +1824,7 @@ struct net_device { #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif - unsigned long tx_queue_len; + unsigned int tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 65ea0ff4017c..58e6cc70500d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -323,7 +323,11 @@ NETDEVICE_SHOW_RW(flags, fmt_hex); static int change_tx_queue_len(struct net_device *dev, unsigned long new_len) { - int res, orig_len = dev->tx_queue_len; + unsigned int orig_len = dev->tx_queue_len; + int res; + + if (new_len != (unsigned int)new_len) + return -ERANGE; if (new_len != orig_len) { dev->tx_queue_len = new_len; @@ -349,7 +353,7 @@ static ssize_t tx_queue_len_store(struct device *dev, return netdev_store(dev, attr, buf, len, change_tx_queue_len); } -NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong); +NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d7f82c3450b1..f759f22af0af 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2046,8 +2046,8 @@ static int do_setlink(const struct sk_buff *skb, } if (tb[IFLA_TXQLEN]) { - unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); - unsigned long orig_len = dev->tx_queue_len; + unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); + unsigned int orig_len = dev->tx_queue_len; if (dev->tx_queue_len ^ value) { dev->tx_queue_len = value; -- cgit v1.2.3 From c7f235a7c2d09b1b83671ba2d93ebee981554467 Mon Sep 17 00:00:00 2001 From: Holger Brunck Date: Wed, 17 May 2017 17:24:37 +0200 Subject: fsl/qe: add bit description for SYNL register for GUMR Add the bitmask for the two bit SYNL register according to the QUICK Engine Reference Manual. Signed-off-by: Holger Brunck Cc: Zhao Qiang Signed-off-by: David S. Miller --- include/soc/fsl/qe/qe.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 0cd4c11479b1..226f915a68c2 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -668,6 +668,10 @@ struct ucc_slow_pram { #define UCC_FAST_GUMR_CTSS 0x00800000 #define UCC_FAST_GUMR_TXSY 0x00020000 #define UCC_FAST_GUMR_RSYN 0x00010000 +#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000 +#define UCC_FAST_GUMR_SYNL_16 0x0000C000 +#define UCC_FAST_GUMR_SYNL_8 0x00008000 +#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000 #define UCC_FAST_GUMR_RTSM 0x00002000 #define UCC_FAST_GUMR_REVD 0x00000400 #define UCC_FAST_GUMR_ENR 0x00000020 -- cgit v1.2.3 From 067bb938dad61e58fc3d6a0e090b72ec011851cd Mon Sep 17 00:00:00 2001 From: Holger Brunck Date: Wed, 17 May 2017 17:24:38 +0200 Subject: net/wan/fsl_ucc_hdlc: add hdlc-bus support This adds support for hdlc-bus mode to the fsl_ucc_hdlc driver. This can be enabled with the "fsl,hdlc-bus" property in the DTS node of the corresponding ucc. This aligns the configuration of the UPSMR and GUMR registers to what is done in our ucc_hdlc driver (that only support hdlc-bus mode) and with the QuickEngine's documentation for hdlc-bus mode. GUMR/SYNL is set to AUTO for the busmode as in this case the CD signal is ignored. The brkpt_support is enabled to set the HBM1 bit in the CMXUCR register to configure an open-drain connected HDLC bus. Signed-off-by: Holger Brunck Cc: Zhao Qiang Signed-off-by: David S. Miller --- drivers/net/wan/fsl_ucc_hdlc.c | 32 ++++++++++++++++++++++++++++++++ drivers/net/wan/fsl_ucc_hdlc.h | 1 + include/soc/fsl/qe/qe.h | 5 +++++ 3 files changed, 38 insertions(+) (limited to 'include') diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 4c93d561b18a..e9b2d687f150 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -98,6 +98,13 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) uf_info->tsa = 1; uf_info->ctsp = 1; } + + /* This sets HPM register in CMXUCR register which configures a + * open drain connected HDLC bus + */ + if (priv->hdlc_bus) + uf_info->brkpt_support = 1; + uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_TXB) << 16); @@ -135,6 +142,28 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) /* Set UPSMR normal mode (need fixed)*/ iowrite32be(0, &priv->uf_regs->upsmr); + /* hdlc_bus mode */ + if (priv->hdlc_bus) { + u32 upsmr; + + dev_info(priv->dev, "HDLC bus Mode\n"); + upsmr = ioread32be(&priv->uf_regs->upsmr); + + /* bus mode and retransmit enable, with collision window + * set to 8 bytes + */ + upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS | + UCC_HDLC_UPSMR_CW8; + iowrite32be(upsmr, &priv->uf_regs->upsmr); + + /* explicitly disable CDS & CTSP */ + gumr = ioread32be(&priv->uf_regs->gumr); + gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP); + /* set automatic sync to explicitly ignore CD signal */ + gumr |= UCC_FAST_GUMR_SYNL_AUTO; + iowrite32be(gumr, &priv->uf_regs->gumr); + } + priv->rx_ring_size = RX_BD_RING_LEN; priv->tx_ring_size = TX_BD_RING_LEN; /* Alloc Rx BD */ @@ -1046,6 +1075,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) uhdlc_priv->loopback = 1; + if (of_get_property(np, "fsl,hdlc-bus", NULL)) + uhdlc_priv->hdlc_bus = 1; + if (uhdlc_priv->tsa == 1) { utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); if (!utdm) { diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h index 881ecdeef076..c21134c1f180 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.h +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -78,6 +78,7 @@ struct ucc_hdlc_private { u16 tsa; bool hdlc_busy; bool loopback; + bool hdlc_bus; u8 *tx_buffer; u8 *rx_buffer; diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 226f915a68c2..b3d1aff5e8ad 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -789,6 +789,11 @@ struct ucc_slow_pram { #define UCC_GETH_UPSMR_SMM 0x00000080 #define UCC_GETH_UPSMR_SGMM 0x00000020 +/* UCC Protocol Specific Mode Register (UPSMR), when used for HDLC */ +#define UCC_HDLC_UPSMR_RTE 0x02000000 +#define UCC_HDLC_UPSMR_BUS 0x00200000 +#define UCC_HDLC_UPSMR_CW8 0x00007000 + /* UCC Transmit On Demand Register (UTODR) */ #define UCC_SLOW_TOD 0x8000 #define UCC_FAST_TOD 0x8000 -- cgit v1.2.3 From 667271455f883d23e6418ef3376eb1214da9d066 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 17 May 2017 19:31:39 +0300 Subject: net: make struct dst_entry::dev first member struct dst_entry::dev is used most often. Move it so it can be accessed without imm8 offset on x86_64. add/remove: 0/0 grow/shrink: 9/239 up/down: 52/-413 (-361) function old new delta dst_rcu_free 126 138 +12 fnhe_flush_routes 211 219 +8 rt_set_nexthop 747 754 +7 rt_cache_route 85 91 +6 rt6_release 209 215 +6 dst_release 107 111 +4 dst_destroy_rcu 29 33 +4 dn_dst_check_expire 329 333 +4 dn_insert_route 484 485 +1 xfrm_resolve_and_create_bundle 2991 2990 -1 ... ip_route_me_harder 1163 1157 -6 __ip_append_data.isra 2730 2724 -6 ip6_forward 3052 3045 -7 callforward_do_filter 659 651 -8 dst_gc_task 571 549 -22 Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/net/dst.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/dst.h b/include/net/dst.h index 049af33da3b6..755a0eedffde 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -31,9 +31,9 @@ struct sk_buff; struct dst_entry { + struct net_device *dev; struct rcu_head rcu_head; struct dst_entry *child; - struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; -- cgit v1.2.3 From f0c24ccf491b09de53cee32114c924551218f2bc Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 17 May 2017 15:46:04 -0400 Subject: net: dsa: include switchdev.h only once DSA drivers and core use switchdev. Include switchdev.h only once, in the dsa.h public header, so that inclusion in DSA drivers or forward declarations of switchdev structures in not necessary anymore. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 1 - drivers/net/dsa/bcm_sf2.c | 1 - drivers/net/dsa/dsa_loop.c | 1 - drivers/net/dsa/mt7530.c | 1 - drivers/net/dsa/mv88e6xxx/chip.c | 1 - drivers/net/dsa/qca8k.c | 1 - include/net/dsa.h | 7 +------ net/dsa/slave.c | 1 - 8 files changed, 1 insertion(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 658a12c888a8..fbc3eb17c7a3 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "b53_regs.h" #include "b53_priv.h" diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 215d41c1e71f..687a8bae5d73 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include "bcm_sf2.h" diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index a19e1781e9bb..6afab16d13dd 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "dsa_loop.h" diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index b070c167e70f..1bcbe15870ed 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -28,7 +28,6 @@ #include #include #include -#include #include "mt7530.h" diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d034d8cd7d22..386d878569ed 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -32,7 +32,6 @@ #include #include #include -#include #include "mv88e6xxx.h" #include "global1.h" diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 942b9ac7f92a..149f109dbffb 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/include/net/dsa.h b/include/net/dsa.h index ed767beca9c6..bf6a2abb9b99 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -20,6 +20,7 @@ #include #include #include +#include struct tc_action; struct phy_device; @@ -284,12 +285,6 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) return ds->rtable[dst->cpu_dp->ds->index]; } -struct switchdev_trans; -struct switchdev_obj; -struct switchdev_obj_port_fdb; -struct switchdev_obj_port_mdb; -struct switchdev_obj_port_vlan; - #define DSA_NOTIFIER_BRIDGE_JOIN 1 #define DSA_NOTIFIER_BRIDGE_LEAVE 2 diff --git a/net/dsa/slave.c b/net/dsa/slave.c index fb13c5d7d587..91236d602301 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 438ff53739ee523de3755a98ae3a290e69752620 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 17 May 2017 15:46:05 -0400 Subject: net: dsa: use switchdev_obj_dump_cb_t everywhere Now that the DSA public header includes switchdev.h, use the provided switchdev_obj_dump_cb_t typedef for the object dump callback. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 6 +++--- drivers/net/dsa/b53/b53_priv.h | 4 ++-- drivers/net/dsa/dsa_loop.c | 2 +- drivers/net/dsa/mt7530.c | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 10 +++++----- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 6 +++--- 7 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index fbc3eb17c7a3..fa099ed41652 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1055,7 +1055,7 @@ EXPORT_SYMBOL(b53_vlan_del); int b53_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct b53_device *dev = ds->priv; u16 vid, vid_start = 0, pvid; @@ -1284,7 +1284,7 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx, static int b53_fdb_copy(struct net_device *dev, int port, const struct b53_arl_entry *ent, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { if (!ent->is_valid) return 0; @@ -1301,7 +1301,7 @@ static int b53_fdb_copy(struct net_device *dev, int port, int b53_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct b53_device *priv = ds->priv; struct net_device *dev = ds->ports[port].netdev; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index a9dc90a01438..155a9c48c317 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -395,7 +395,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); int b53_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); int b53_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans); @@ -406,7 +406,7 @@ int b53_fdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb); int b53_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void b53_mirror_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 6afab16d13dd..5edf07beb9d2 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -187,7 +187,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct dsa_loop_priv *ps = ds->priv; struct mii_bus *bus = ps->bus; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1bcbe15870ed..4d2f45153ede 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -853,7 +853,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port, static int mt7530_port_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mt7530_priv *priv = ds->priv; struct mt7530_fdb _fdb = { 0 }; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 386d878569ed..41de250dbcc3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1268,7 +1268,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_vtu_entry next = { @@ -1699,7 +1699,7 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, struct switchdev_obj *obj, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mv88e6xxx_atu_entry addr; int err; @@ -1754,7 +1754,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, struct switchdev_obj *obj, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mv88e6xxx_vtu_entry vlan = { .vid = chip->info->max_vid, @@ -1791,7 +1791,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -4037,7 +4037,7 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_mdb *mdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct mv88e6xxx_chip *chip = ds->priv; int err; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 149f109dbffb..0f6a011d8ed1 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -872,7 +872,7 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port, static int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) + switchdev_obj_dump_cb_t *cb) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; struct qca8k_fdb _fdb = { 0 }; diff --git a/include/net/dsa.h b/include/net/dsa.h index bf6a2abb9b99..791fed62fb16 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -404,7 +404,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_vlan *vlan); int (*port_vlan_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * Forwarding database @@ -419,7 +419,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_fdb *fdb); int (*port_fdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * Multicast database @@ -434,7 +434,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_mdb *mdb); int (*port_mdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_mdb *mdb, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * RXNFC -- cgit v1.2.3 From 6bdc00d01e202ae11fa1cae0dacbef895434483d Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 28 Apr 2017 13:47:21 +0200 Subject: serdev: Restore serdev_device_write_buf for atomic context Starting with commit 6fe729c4bdae ("serdev: Add serdev_device_write subroutine") the function serdev_device_write_buf cannot be used in atomic context anymore (mutex_lock is sleeping). So restore the old behavior. Signed-off-by: Stefan Wahren Fixes: 6fe729c4bdae ("serdev: Add serdev_device_write subroutine") Acked-by: Rob Herring Reviewed-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/core.c | 12 ++++++++++++ include/linux/serdev.h | 14 +++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 433de5ea9b02..f71b47334149 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev) } EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); +int serdev_device_write_buf(struct serdev_device *serdev, + const unsigned char *buf, size_t count) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->write_buf) + return -EINVAL; + + return ctrl->ops->write_buf(ctrl, buf, count); +} +EXPORT_SYMBOL_GPL(serdev_device_write_buf); + int serdev_device_write(struct serdev_device *serdev, const unsigned char *buf, size_t count, unsigned long timeout) diff --git a/include/linux/serdev.h b/include/linux/serdev.h index cda76c6506ca..e2a225bf716d 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *); void serdev_device_close(struct serdev_device *); unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); void serdev_device_set_flow_control(struct serdev_device *, bool); +int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); @@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev return 0; } static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} +static inline int serdev_device_write_buf(struct serdev_device *serdev, + const unsigned char *buf, + size_t count) +{ + return -ENODEV; +} static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} static inline int serdev_device_get_tiocm(struct serdev_device *serdev) { @@ -312,11 +319,4 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port, static inline void serdev_tty_port_unregister(struct tty_port *port) {} #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ -static inline int serdev_device_write_buf(struct serdev_device *serdev, - const unsigned char *data, - size_t count) -{ - return serdev_device_write(serdev, data, count, 0); -} - #endif /*_LINUX_SERDEV_H */ -- cgit v1.2.3 From 8cde11b2baa1d02eb2eb955dfd47d9f2a12f12cf Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 18 May 2017 17:33:00 +0200 Subject: tty/serdev: add serdev registration interface Add a new interface for registering a serdev controller and clients, and a helper function to deregister serdev devices (or a tty device) that were previously registered using the new interface. Once every driver currently using the tty_port_register_device() helpers have been vetted and converted to use the new serdev registration interface (at least for deregistration), we can move serdev registration to the current helpers and get rid of the serdev-specific functions. Reviewed-by: Rob Herring Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/serdev-ttyport.c | 6 ++- drivers/tty/tty_port.c | 75 +++++++++++++++++++++++++++++++++++++ include/linux/serdev.h | 7 +++- include/linux/tty.h | 9 +++++ 4 files changed, 93 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 013efffd2e82..d0a021c93986 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -250,16 +250,18 @@ err_reset_data: return ERR_PTR(ret); } -void serdev_tty_port_unregister(struct tty_port *port) +int serdev_tty_port_unregister(struct tty_port *port) { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); if (!serport) - return; + return -ENODEV; serdev_controller_remove(ctrl); port->client_ops = NULL; port->client_data = NULL; serdev_controller_put(ctrl); + + return 0; } diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 88dac3b79369..4fb3165384c4 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -16,6 +16,7 @@ #include #include #include +#include static int tty_port_default_receive_buf(struct tty_port *port, const unsigned char *p, @@ -136,6 +137,80 @@ struct device *tty_port_register_device_attr(struct tty_port *port, } EXPORT_SYMBOL_GPL(tty_port_register_device_attr); +/** + * tty_port_register_device_attr_serdev - register tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * @device: parent if exists, otherwise NULL + * @drvdata: driver data for the device + * @attr_grp: attribute group for the device + * + * Register a serdev or tty device depending on if the parent device has any + * defined serdev clients or not. + */ +struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp) +{ + struct device *dev; + + tty_port_link_device(port, driver, index); + + dev = serdev_tty_port_register(port, device, driver, index); + if (PTR_ERR(dev) != -ENODEV) { + /* Skip creating cdev if we registered a serdev device */ + return dev; + } + + return tty_register_device_attr(driver, index, device, drvdata, + attr_grp); +} +EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev); + +/** + * tty_port_register_device_serdev - register tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * @device: parent if exists, otherwise NULL + * + * Register a serdev or tty device depending on if the parent device has any + * defined serdev clients or not. + */ +struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device) +{ + return tty_port_register_device_attr_serdev(port, driver, index, + device, NULL, NULL); +} +EXPORT_SYMBOL_GPL(tty_port_register_device_serdev); + +/** + * tty_port_unregister_device - deregister a tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * + * If a tty or serdev device is registered with a call to + * tty_port_register_device_serdev() then this function must be called when + * the device is gone. + */ +void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index) +{ + int ret; + + ret = serdev_tty_port_unregister(port); + if (ret == 0) + return; + + tty_unregister_device(driver, index); +} +EXPORT_SYMBOL_GPL(tty_port_unregister_device); + int tty_port_alloc_xmit_buf(struct tty_port *port) { /* We may sleep in get_zeroed_page() */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index e2a225bf716d..e69402d4a8ae 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -308,7 +308,7 @@ struct tty_driver; struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx); -void serdev_tty_port_unregister(struct tty_port *port); +int serdev_tty_port_unregister(struct tty_port *port); #else static inline struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, @@ -316,7 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port, { return ERR_PTR(-ENODEV); } -static inline void serdev_tty_port_unregister(struct tty_port *port) {} +static inline int serdev_tty_port_unregister(struct tty_port *port) +{ + return -ENODEV; +} #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ #endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/tty.h b/include/linux/tty.h index d07cd2105a6c..eccb4ec30a8a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); +extern struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +extern void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); extern int tty_port_alloc_xmit_buf(struct tty_port *port); extern void tty_port_free_xmit_buf(struct tty_port *port); extern void tty_port_destroy(struct tty_port *port); -- cgit v1.2.3 From 7b6859fbdcc4a590c8ef03bcc00d770b42d41c42 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Thu, 18 May 2017 19:41:04 +0300 Subject: qed: Utilize FW 8.20.0.0 This pushes qed [and as result, all qed* drivers] into using 8.20.0.0 firmware. The changes are mostly contained in qed with minor changes to qedi due to some HSI changes. Content-wise, the firmware contains fixes to various issues exposed since the release of the previous firmware, including: - Corrects iSCSI fast retransmit when data digest is enabled. - Stop draining packets when receiving several consecutive PFCs. - Prevent possible assertion when consecutively opening/closing many connections. - Prevent possible assertion due to too long BDQ fetch time. In addition, the new firmware would allow us to later add iWARP support in qed and qedr. Changes from previous version ----------------------------- - V2: Fix warning in qed_debug.c Signed-off-by: Chad Dupuis Signed-off-by: Ram Amrani Signed-off-by: Tomer Tayar Signed-off-by: Manish Rangankar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 11 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 3332 +++++++++-------- drivers/net/ethernet/qlogic/qed/qed_debug.h | 3 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 3739 +++++++++++++------- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 267 +- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 3 - drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 186 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 4 - drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 23 +- drivers/scsi/qedi/qedi_fw.c | 20 +- drivers/scsi/qedi/qedi_fw_api.c | 3 +- drivers/scsi/qedi/qedi_iscsi.c | 3 - include/linux/qed/common_hsi.h | 209 +- include/linux/qed/eth_common.h | 3 +- include/linux/qed/fcoe_common.h | 1 - include/linux/qed/iscsi_common.h | 91 +- include/linux/qed/rdma_common.h | 2 +- include/linux/qed/roce_common.h | 2 + include/linux/qed/tcp_common.h | 5 +- 20 files changed, 4841 insertions(+), 3068 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 2ab1aab7c3fe..162cd7ff9a69 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -54,7 +54,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 #define QED_MINOR_VERSION 10 -#define QED_REVISION_VERSION 10 +#define QED_REVISION_VERSION 11 #define QED_ENGINEERING_VERSION 21 #define QED_VERSION \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d883ad5bec6d..b7ca0e2181c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -944,17 +944,18 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, p_dest->pf_id = p_src->pf_id; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; - p_dest->update_fcoe_dcb_data_flag = update_flag; + p_dest->update_fcoe_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; - p_dest->update_roce_dcb_data_flag = update_flag; + p_dest->update_roce_dcb_data_mode = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; - p_dest->update_roce_dcb_data_flag = update_flag; + p_dest->update_rroce_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; - p_dest->update_iscsi_dcb_data_flag = update_flag; + p_dest->update_iscsi_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; - p_dest->update_eth_dcb_data_flag = update_flag; + p_dest->update_eth_dcb_data_mode = update_flag; p_dcb_data = &p_dest->fcoe_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 483241b4b05d..87a1389fb4a8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -15,13 +15,6 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" -/* Chip IDs enum */ -enum chip_ids { - CHIP_BB_B0, - CHIP_K2, - MAX_CHIP_IDS -}; - /* Memory groups enum */ enum mem_groups { MEM_GROUP_PXP_MEM, @@ -33,7 +26,6 @@ enum mem_groups { MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, MEM_GROUP_SDM_MEM, - MEM_GROUP_PBUF, MEM_GROUP_IOR, MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, @@ -45,6 +37,7 @@ enum mem_groups { MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, MEM_GROUP_PXP_ILT, + MEM_GROUP_PBUF, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, MEM_GROUP_IGU_MEM, @@ -66,7 +59,6 @@ static const char * const s_mem_group_names[] = { "BRB_MEM", "PRS_MEM", "SDM_MEM", - "PBUF", "IOR", "RAM", "BTB_RAM", @@ -78,6 +70,7 @@ static const char * const s_mem_group_names[] = { "CAU_PI", "CAU_MEM", "PXP_ILT", + "PBUF", "MULD_MEM", "BTB_MEM", "IGU_MEM", @@ -88,48 +81,59 @@ static const char * const s_mem_group_names[] = { }; /* Idle check conditions */ -static u32 cond4(const u32 *r, const u32 *imm) + +static u32 cond5(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); } -static u32 cond6(const u32 *r, const u32 *imm) +static u32 cond7(const u32 *r, const u32 *imm) { return ((r[0] >> imm[0]) & imm[1]) != imm[2]; } -static u32 cond5(const u32 *r, const u32 *imm) +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]); +} + +static u32 cond6(const u32 *r, const u32 *imm) { return (r[0] & imm[0]) != imm[1]; } -static u32 cond8(const u32 *r, const u32 *imm) +static u32 cond9(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); } -static u32 cond9(const u32 *r, const u32 *imm) +static u32 cond10(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]); } -static u32 cond1(const u32 *r, const u32 *imm) +static u32 cond4(const u32 *r, const u32 *imm) { return (r[0] & ~imm[0]) != imm[1]; } static u32 cond0(const u32 *r, const u32 *imm) +{ + return (r[0] & ~r[1]) != imm[0]; +} + +static u32 cond1(const u32 *r, const u32 *imm) { return r[0] != imm[0]; } -static u32 cond10(const u32 *r, const u32 *imm) +static u32 cond11(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] == imm[0]; } -static u32 cond11(const u32 *r, const u32 *imm) +static u32 cond12(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] > imm[0]; } @@ -139,12 +143,12 @@ static u32 cond3(const u32 *r, const u32 *imm) return r[0] != r[1]; } -static u32 cond12(const u32 *r, const u32 *imm) +static u32 cond13(const u32 *r, const u32 *imm) { return r[0] & imm[0]; } -static u32 cond7(const u32 *r, const u32 *imm) +static u32 cond8(const u32 *r, const u32 *imm) { return r[0] < (r[1] - imm[0]); } @@ -169,6 +173,8 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond10, cond11, cond12, + cond13, + cond14, }; /******************************* Data Types **********************************/ @@ -181,11 +187,6 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct dbg_array { - const u32 *ptr; - u32 size_in_dwords; -}; - struct chip_platform_defs { u8 num_ports; u8 num_pfs; @@ -204,7 +205,9 @@ struct platform_defs { u32 delay_factor; }; -/* Storm constant definitions */ +/* Storm constant definitions. + * Addresses are in bytes, sizes are in quad-regs. + */ struct storm_defs { char letter; enum block_id block_id; @@ -218,13 +221,13 @@ struct storm_defs { u32 sem_sync_dbg_empty_addr; u32 sem_slow_dbg_empty_addr; u32 cm_ctx_wr_addr; - u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */ + u32 cm_conn_ag_ctx_lid_size; u32 cm_conn_ag_ctx_rd_addr; - u32 cm_conn_st_ctx_lid_size; /* In quad-regs */ + u32 cm_conn_st_ctx_lid_size; u32 cm_conn_st_ctx_rd_addr; - u32 cm_task_ag_ctx_lid_size; /* In quad-regs */ + u32 cm_task_ag_ctx_lid_size; u32 cm_task_ag_ctx_rd_addr; - u32 cm_task_st_ctx_lid_size; /* In quad-regs */ + u32 cm_task_st_ctx_lid_size; u32 cm_task_st_ctx_rd_addr; }; @@ -233,17 +236,23 @@ struct block_defs { const char *name; bool has_dbg_bus[MAX_CHIP_IDS]; bool associated_to_storm; - u32 storm_id; /* Valid only if associated_to_storm is true */ + + /* Valid only if associated_to_storm is true */ + u32 storm_id; enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; u32 dbg_select_addr; - u32 dbg_cycle_enable_addr; + u32 dbg_enable_addr; u32 dbg_shift_addr; u32 dbg_force_valid_addr; u32 dbg_force_frame_addr; bool has_reset_bit; - bool unreset; /* If true, the block is taken out of reset before dump */ + + /* If true, block is taken out of reset before dump */ + bool unreset; enum dbg_reset_regs reset_reg; - u8 reset_bit_offset; /* Bit offset in reset register */ + + /* Bit offset in reset register */ + u8 reset_bit_offset; }; /* Reset register definitions */ @@ -262,12 +271,13 @@ struct grc_param_defs { u32 crash_preset_val; }; +/* Address is in 128b units. Width is in bits. */ struct rss_mem_defs { const char *mem_name; const char *type_name; - u32 addr; /* In 128b units */ + u32 addr; u32 num_entries[MAX_CHIP_IDS]; - u32 entry_width[MAX_CHIP_IDS]; /* In bits */ + u32 entry_width[MAX_CHIP_IDS]; }; struct vfc_ram_defs { @@ -289,10 +299,20 @@ struct big_ram_defs { struct phy_defs { const char *phy_name; + + /* PHY base GRC address */ u32 base_addr; + + /* Relative address of indirect TBUS address register (bits 0..7) */ u32 tbus_addr_lo_addr; + + /* Relative address of indirect TBUS address register (bits 8..10) */ u32 tbus_addr_hi_addr; + + /* Relative address of indirect TBUS data register (bits 0..7) */ u32 tbus_data_lo_addr; + + /* Relative address of indirect TBUS data register (bits 8..11) */ u32 tbus_data_hi_addr; }; @@ -300,9 +320,11 @@ struct phy_defs { #define MAX_LCIDS 320 #define MAX_LTIDS 320 + #define NUM_IOR_SETS 2 #define IORS_PER_SET 176 #define IOR_SET_OFFSET(set_id) ((set_id) * 256) + #define BYTES_IN_DWORD sizeof(u32) /* In the macros below, size and offset are specified in bits */ @@ -315,6 +337,7 @@ struct phy_defs { #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ FIELD_DWORD_SHIFT(type, field)) + #define SET_VAR_FIELD(var, type, field, val) \ do { \ var[FIELD_DWORD_OFFSET(type, field)] &= \ @@ -322,31 +345,51 @@ struct phy_defs { var[FIELD_DWORD_OFFSET(type, field)] |= \ (val) << FIELD_DWORD_SHIFT(type, field); \ } while (0) + #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ do { \ for (i = 0; i < (arr_size); i++) \ qed_wr(dev, ptt, addr, (arr)[i]); \ } while (0) + #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \ do { \ for (i = 0; i < (arr_size); i++) \ (arr)[i] = qed_rd(dev, ptt, addr); \ } while (0) +#ifndef DWORDS_TO_BYTES #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) +#endif +#ifndef BYTES_TO_DWORDS #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) +#endif + +/* extra lines include a signature line + optional latency events line */ +#ifndef NUM_DBG_LINES +#define NUM_EXTRA_DBG_LINES(block_desc) \ + (1 + ((block_desc)->has_latency_events ? 1 : 0)) +#define NUM_DBG_LINES(block_desc) \ + ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) +#endif + #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines)) + #define REG_DUMP_LEN_SHIFT 24 #define MEM_DUMP_ENTRY_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem)) + #define IDLE_CHK_RULE_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule)) + #define IDLE_CHK_RESULT_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr)) + #define IDLE_CHK_RESULT_REG_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) + #define IDLE_CHK_MAX_ENTRIES_SIZE 32 /* The sizes and offsets below are specified in bits */ @@ -363,62 +406,92 @@ struct phy_defs { #define VFC_RAM_ADDR_ROW_OFFSET 2 #define VFC_RAM_ADDR_ROW_SIZE 10 #define VFC_RAM_RESP_STRUCT_SIZE 256 + #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE) #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE) #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE) #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE) #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE) + #define NUM_VFC_RAM_TYPES 4 + #define VFC_CAM_NUM_ROWS 512 + #define VFC_OPCODE_CAM_RD 14 #define VFC_OPCODE_RAM_RD 0 + #define NUM_RSS_MEM_TYPES 5 + #define NUM_BIG_RAM_TYPES 3 #define BIG_RAM_BLOCK_SIZE_BYTES 128 #define BIG_RAM_BLOCK_SIZE_DWORDS \ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES) + #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) + #define RESET_REG_UNRESET_OFFSET 4 + #define STALL_DELAY_MS 500 + #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_DBG_BUS_LINES 256 + #define NUM_COMMON_GLOBAL_PARAMS 8 + #define FW_IMG_MAIN 1 -#define REG_FIFO_DEPTH_ELEMENTS 32 + +#ifndef REG_FIFO_ELEMENT_DWORDS #define REG_FIFO_ELEMENT_DWORDS 2 +#endif +#define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_DWORDS \ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) -#define IGU_FIFO_DEPTH_ELEMENTS 64 + +#ifndef IGU_FIFO_ELEMENT_DWORDS #define IGU_FIFO_ELEMENT_DWORDS 4 +#endif +#define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_DWORDS \ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) -#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 + +#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 +#endif +#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ PROTECTION_OVERRIDE_ELEMENT_DWORDS) + #define MCP_SPAD_TRACE_OFFSIZE_ADDR \ (MCP_REG_SCRATCH + \ offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) -#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa + #define EMPTY_FW_VERSION_STR "???_???_???_???" #define EMPTY_FW_IMAGE_STR "???????????????" /***************************** Constant Arrays *******************************/ +struct dbg_array { + const u32 *ptr; + u32 size_in_dwords; +}; + /* Debug arrays */ -static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; +static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb_b0", - { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0}, - {0, 0, 0}, {0, 0, 0} } }, - { "k2", - { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0}, - {0, 0, 0}, {0, 0, 0} } } + { "bb", + {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0} } }, + { "ah", + {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0} } } }; /* Storm constant definitions array */ @@ -427,69 +500,74 @@ static struct storm_defs s_storm_defs[] = { {'T', BLOCK_TSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, - TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, - TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, + TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, TCM_REG_CTX_RBC_ACCS, 4, TCM_REG_AGG_CON_CTX, 16, TCM_REG_SM_CON_CTX, 2, TCM_REG_AGG_TASK_CTX, 4, TCM_REG_SM_TASK_CTX}, + /* Mstorm */ {'M', BLOCK_MSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, - MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, - MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY, + MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2, MCM_REG_CTX_RBC_ACCS, 1, MCM_REG_AGG_CON_CTX, 10, MCM_REG_SM_CON_CTX, 2, MCM_REG_AGG_TASK_CTX, 7, MCM_REG_SM_TASK_CTX}, + /* Ustorm */ {'U', BLOCK_USEM, {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, - USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, - USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY, + USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2, + USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2, UCM_REG_CTX_RBC_ACCS, 2, UCM_REG_AGG_CON_CTX, 13, UCM_REG_SM_CON_CTX, 3, UCM_REG_AGG_TASK_CTX, 3, UCM_REG_SM_TASK_CTX}, + /* Xstorm */ {'X', BLOCK_XSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, - XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, - XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY, + XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2, XCM_REG_CTX_RBC_ACCS, 9, XCM_REG_AGG_CON_CTX, 15, XCM_REG_SM_CON_CTX, 0, 0, 0, 0}, + /* Ystorm */ {'Y', BLOCK_YSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, - YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, - YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, + YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, YCM_REG_CTX_RBC_ACCS, 2, YCM_REG_AGG_CON_CTX, 3, YCM_REG_SM_CON_CTX, 2, YCM_REG_AGG_TASK_CTX, 12, YCM_REG_SM_TASK_CTX}, + /* Pstorm */ {'P', BLOCK_PSEM, {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, - PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, - PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY, + PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2, PCM_REG_CTX_RBC_ACCS, 0, 0, 10, PCM_REG_SM_CON_CTX, @@ -498,6 +576,7 @@ static struct storm_defs s_storm_defs[] = { }; /* Block definitions array */ + static struct block_defs block_grc_defs = { "grc", {true, true}, false, 0, @@ -587,9 +666,11 @@ static struct block_defs block_pcie_defs = { "pcie", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, - PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, - PCIE_REG_DBG_COMMON_FORCE_FRAME, + PCIE_REG_DBG_COMMON_SELECT_K2, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, + PCIE_REG_DBG_COMMON_SHIFT_K2, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -691,9 +772,9 @@ static struct block_defs block_pglcs_defs = { "pglcs", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE, - PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID, - PGLCS_REG_DBG_FORCE_FRAME, + PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2, + PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2, + PGLCS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 2 }; @@ -991,10 +1072,11 @@ static struct block_defs block_yuld_defs = { "yuld", {true, true}, false, 0, {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE, - YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID, - YULD_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 + YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2, + YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2, + YULD_REG_DBG_FORCE_FRAME_BB_K2, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 15 }; static struct block_defs block_xyld_defs = { @@ -1143,9 +1225,9 @@ static struct block_defs block_umac_defs = { "umac", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE, - UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID, - UMAC_REG_DBG_FORCE_FRAME, + UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2, + UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2, + UMAC_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 6 }; @@ -1177,9 +1259,9 @@ static struct block_defs block_wol_defs = { "wol", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE, - WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID, - WOL_REG_DBG_FORCE_FRAME, + WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2, + WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2, + WOL_REG_DBG_FORCE_FRAME_K2, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 }; @@ -1187,9 +1269,9 @@ static struct block_defs block_bmbn_defs = { "bmbn", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, - BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE, - BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID, - BMBN_REG_DBG_FORCE_FRAME, + BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2, + BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2, + BMBN_REG_DBG_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1204,9 +1286,9 @@ static struct block_defs block_nwm_defs = { "nwm", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE, - NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID, - NWM_REG_DBG_FORCE_FRAME, + NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2, + NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2, + NWM_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 }; @@ -1214,9 +1296,9 @@ static struct block_defs block_nws_defs = { "nws", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE, - NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID, - NWS_REG_DBG_FORCE_FRAME, + NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2, + NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2, + NWS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 12 }; @@ -1224,9 +1306,9 @@ static struct block_defs block_ms_defs = { "ms", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE, - MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID, - MS_REG_DBG_FORCE_FRAME, + MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2, + MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2, + MS_REG_DBG_FORCE_FRAME_K2, true, false, DBG_RESET_REG_MISCS_PL_HV, 13 }; @@ -1234,9 +1316,11 @@ static struct block_defs block_phy_pcie_defs = { "phy_pcie", {false, true}, false, 0, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, - PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, - PCIE_REG_DBG_COMMON_FORCE_FRAME, + PCIE_REG_DBG_COMMON_SELECT_K2, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, + PCIE_REG_DBG_COMMON_SHIFT_K2, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1261,6 +1345,13 @@ static struct block_defs block_rgfs_defs = { false, false, MAX_DBG_RESET_REGS, 0 }; +static struct block_defs block_rgsrc_defs = { + "rgsrc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + static struct block_defs block_tgfs_defs = { "tgfs", {false, false}, false, 0, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, @@ -1268,6 +1359,13 @@ static struct block_defs block_tgfs_defs = { false, false, MAX_DBG_RESET_REGS, 0 }; +static struct block_defs block_tgsrc_defs = { + "tgsrc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + static struct block_defs block_ptld_defs = { "ptld", {false, false}, false, 0, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, @@ -1350,6 +1448,8 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_muld_defs, &block_yuld_defs, &block_xyld_defs, + &block_ptld_defs, + &block_ypld_defs, &block_prm_defs, &block_pbf_pb1_defs, &block_pbf_pb2_defs, @@ -1363,6 +1463,10 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_tcfc_defs, &block_igu_defs, &block_cau_defs, + &block_rgfs_defs, + &block_rgsrc_defs, + &block_tgfs_defs, + &block_tgsrc_defs, &block_umac_defs, &block_xmac_defs, &block_dbg_defs, @@ -1376,10 +1480,6 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_phy_pcie_defs, &block_led_defs, &block_avs_wrap_defs, - &block_rgfs_defs, - &block_tgfs_defs, - &block_ptld_defs, - &block_ypld_defs, &block_misc_aeu_defs, &block_bar0_map_defs, }; @@ -1392,66 +1492,151 @@ static struct platform_defs s_platform_defs[] = { }; static struct grc_param_defs s_grc_param_defs[] = { - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ + /* DBG_GRC_PARAM_DUMP_TSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_MSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_USTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_XSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_YSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_PSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, + + /* DBG_GRC_PARAM_DUMP_REGS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_RAM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PBUF */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_IOR */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_VFC */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CM_CTX */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_ILT */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_RSS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CAU */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_QM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_MCP */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_RESERVED */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_CFC */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_IGU */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BRB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BTB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_BMB */ + {{0, 0}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_NIG */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_MULD */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PRS */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_DMAE */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_TM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_SDM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_DIF */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_STATIC */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_UNSTALL */ + {{0, 0}, 0, 1, false, 0, 0}, + + /* DBG_GRC_PARAM_NUM_LCIDS */ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, - MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */ + MAX_LCIDS}, + + /* DBG_GRC_PARAM_NUM_LTIDS */ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, - MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */ + MAX_LTIDS}, + + /* DBG_GRC_PARAM_EXCLUDE_ALL */ + {{0, 0}, 0, 1, true, 0, 0}, + + /* DBG_GRC_PARAM_CRASH */ + {{0, 0}, 0, 1, true, 0, 0}, + + /* DBG_GRC_PARAM_PARITY_SAFE */ + {{0, 0}, 0, 1, false, 1, 0}, + + /* DBG_GRC_PARAM_DUMP_CM */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_DUMP_PHY */ + {{1, 1}, 0, 1, false, 0, 1}, + + /* DBG_GRC_PARAM_NO_MCP */ + {{0, 0}, 0, 1, false, 0, 0}, + + /* DBG_GRC_PARAM_NO_FW_VER */ + {{0, 0}, 0, 1, false, 0, 0} }; static struct rss_mem_defs s_rss_mem_defs[] = { { "rss_mem_cid", "rss_cid", 0, {256, 320}, {32, 32} }, + { "rss_mem_key_msb", "rss_key", 1024, {128, 208}, {256, 256} }, + { "rss_mem_key_lsb", "rss_key", 2048, {128, 208}, {64, 64} }, + { "rss_mem_info", "rss_info", 3072, {128, 208}, {16, 16} }, + { "rss_mem_ind", "rss_ind", 4096, - {(128 * 128), (128 * 208)}, + {16384, 26624}, {16, 16} } }; @@ -1466,50 +1651,71 @@ static struct big_ram_defs s_big_ram_defs[] = { { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, {4800, 5632} }, + { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, {2880, 3680} }, + { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, {1152, 1152} } }; static struct reset_reg_defs s_reset_regs_defs[] = { + /* DBG_RESET_REG_MISCS_PL_UA */ { MISCS_REG_RESET_PL_UA, 0x0, - {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ + {true, true} }, + + /* DBG_RESET_REG_MISCS_PL_HV */ { MISCS_REG_RESET_PL_HV, 0x0, - {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ - { MISCS_REG_RESET_PL_HV_2, 0x0, - {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ + {true, true} }, + + /* DBG_RESET_REG_MISCS_PL_HV_2 */ + { MISCS_REG_RESET_PL_HV_2_K2, 0x0, + {false, true} }, + + /* DBG_RESET_REG_MISC_PL_UA */ { MISC_REG_RESET_PL_UA, 0x0, - {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_HV */ { MISC_REG_RESET_PL_HV, 0x0, - {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ + {true, true} }, + + /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ { MISC_REG_RESET_PL_PDA_VAUX, 0x2, - {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ + {true, true} }, }; static struct phy_defs s_phy_defs[] = { - {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8}, - {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131}, - {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, - {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, + {"nw_phy", NWS_REG_NWS_CMU_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; /**************************** Private Functions ******************************/ @@ -1556,7 +1762,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { - dev_data->chip_id = CHIP_BB_B0; + dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; } else { return DBG_STATUS_UNKNOWN_CHIP; @@ -1569,9 +1775,20 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, qed_dbg_grc_init_params(p_hwfn); dev_data->initialized = true; + return DBG_STATUS_OK; } +static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * + MAX_CHIP_IDS + + dev_data->chip_id]; +} + /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ @@ -1579,25 +1796,28 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 storm_id, struct fw_info *fw_info) { - /* Read first the address that points to fw_info location. - * The address is located in the last line of the Storm RAM. - */ - u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_INT_RAM + - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - - sizeof(struct fw_info_location); + struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; - u32 *dest = (u32 *)&fw_info_location; - u32 i; + u32 addr, i, *dest; memset(&fw_info_location, 0, sizeof(fw_info_location)); memset(fw_info, 0, sizeof(*fw_info)); + + /* Read first the address that points to fw_info location. + * The address is located in the last line of the Storm RAM. + */ + addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + sizeof(fw_info_location); + dest = (u32 *)&fw_info_location; + for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); + + /* Read FW version info from Storm RAM */ if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) { - /* Read FW version info from Storm RAM */ addr = fw_info_location.grc_addr; dest = (u32 *)fw_info; for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); @@ -1606,27 +1826,30 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, } } -/* Dumps the specified string to the specified buffer. Returns the dumped size - * in bytes (actual length + 1 for the null character termination). +/* Dumps the specified string to the specified buffer. + * Returns the dumped size in bytes. */ static u32 qed_dump_str(char *dump_buf, bool dump, const char *str) { if (dump) strcpy(dump_buf, str); + return (u32)strlen(str) + 1; } -/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size - * in bytes. +/* Dumps zeros to align the specified buffer to dwords. + * Returns the dumped size in bytes. */ static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset) { - u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size; + u8 offset_in_dword, align_size; + offset_in_dword = (u8)(byte_offset & 0x3); align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0; if (dump && align_size) memset(dump_buf, 0, align_size); + return align_size; } @@ -1653,6 +1876,7 @@ static u32 qed_dump_str_param(u32 *dump_buf, /* Align buffer to next dword */ offset += qed_dump_align(char_buf + offset, dump, offset); + return BYTES_TO_DWORDS(offset); } @@ -1681,6 +1905,7 @@ static u32 qed_dump_num_param(u32 *dump_buf, if (dump) *(dump_buf + offset) = param_val; offset++; + return offset; } @@ -1695,7 +1920,6 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; - int printed_chars; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { @@ -1705,37 +1929,32 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) { - /* Read FW version/image */ - if (!dev_data->block_in_reset - [s_storm_defs[storm_id].block_id]) { - /* read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, - p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - printed_chars = - snprintf(fw_ver_str, - sizeof(fw_ver_str), - "%d_%d_%d_%d", - fw_info.ver.num.major, - fw_info.ver.num.minor, - fw_info.ver.num.rev, - fw_info.ver.num.eng); - if (printed_chars < 0 || printed_chars >= - sizeof(fw_ver_str)) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Read FW version/image */ + if (dev_data->block_in_reset[storm->block_id]) + continue; - found = true; + /* Read FW info for the current Storm */ + qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } + + found = true; } } @@ -1747,6 +1966,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp); + return offset; } @@ -1759,17 +1979,18 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, { char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; - if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { u32 global_section_offsize, global_section_addr, mfw_ver; u32 public_data_addr, global_section_offsize_addr; - int printed_chars; - /* Find MCP public data GRC address. - * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug. + /* Find MCP public data GRC address. Needs to be ORed with + * MCP_REG_SCRATCH due to a HW bug. */ - public_data_addr = qed_rd(p_hwfn, p_ptt, + public_data_addr = qed_rd(p_hwfn, + p_ptt, MISC_REG_SHARED_MEM_ADDR) | - MCP_REG_SCRATCH; + MCP_REG_SCRATCH; /* Find MCP public global section offset */ global_section_offsize_addr = public_data_addr + @@ -1778,9 +1999,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, sizeof(offsize_t) * PUBLIC_GLOBAL; global_section_offsize = qed_rd(p_hwfn, p_ptt, global_section_offsize_addr); - global_section_addr = MCP_REG_SCRATCH + - (global_section_offsize & - OFFSIZE_OFFSET_MASK) * 4; + global_section_addr = + MCP_REG_SCRATCH + + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4; /* Read MFW version from MCP public global section */ mfw_ver = qed_rd(p_hwfn, p_ptt, @@ -1788,13 +2009,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, offsetof(struct public_global, mfw_ver)); /* Dump MFW version param */ - printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str), - "%d_%d_%d_%d", - (u8) (mfw_ver >> 24), - (u8) (mfw_ver >> 16), - (u8) (mfw_ver >> 8), - (u8) mfw_ver); - if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str)) + if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", + (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), + (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid MFW version string\n"); } @@ -1820,11 +2037,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, bool dump, u8 num_specific_global_params) { - u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0; + u8 num_params; - /* Find platform string and dump global params section header */ + /* Dump global params section header */ + num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; offset += qed_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params); @@ -1846,25 +2064,29 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); + return offset; } -/* Writes the last section to the specified buffer at the given offset. - * Returns the dumped size in dwords. +/* Writes the "last" section (including CRC) to the specified buffer at the + * given offset. Returns the dumped size in dwords. */ -static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) +static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn, + u32 *dump_buf, u32 offset, bool dump) { - u32 start_offset = offset, crc = ~0; + u32 start_offset = offset; /* Dump CRC section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); - /* Calculate CRC32 and add it to the dword following the "last" section. - */ + /* Calculate CRC32 and add it to the dword after the "last" section */ if (dump) - *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf, + *(dump_buf + offset) = ~crc32(0xffffffff, + (u8 *)dump_buf, DWORDS_TO_BYTES(offset)); + offset++; + return offset - start_offset; } @@ -1883,11 +2105,12 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, p_ptt, s_reset_regs_defs[i].addr); /* Check if blocks are in reset */ - for (i = 0; i < MAX_BLOCK_ID; i++) - dev_data->block_in_reset[i] = - s_block_defs[i]->has_reset_bit && - !(reg_val[s_block_defs[i]->reset_reg] & - BIT(s_block_defs[i]->reset_bit_offset)); + for (i = 0; i < MAX_BLOCK_ID; i++) { + struct block_defs *block = s_block_defs[i]; + + dev_data->block_in_reset[i] = block->has_reset_bit && + !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset)); + } } /* Enable / disable the Debug block */ @@ -1902,12 +2125,12 @@ static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val; + struct block_defs *dbg_block = s_block_defs[BLOCK_DBG]; - dbg_reset_reg_addr = - s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr; + dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr; old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr); - new_reset_reg_val = old_reset_reg_val & - ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset); + new_reset_reg_val = + old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset); qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val); qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val); @@ -1920,8 +2143,8 @@ static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode); } -/* Enable / disable Debug Bus clients according to the specified mask. - * (1 = enable, 0 = disable) +/* Enable / disable Debug Bus clients according to the specified mask + * (1 = enable, 0 = disable). */ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 client_mask) @@ -1931,10 +2154,14 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) { - const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; bool arg1, arg2; + const u32 *ptr; + u8 tree_val; + + /* Get next element from modes tree buffer */ + ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; + tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: @@ -1974,75 +2201,81 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, enum block_id block_id, u8 mem_group_id) { + struct block_defs *block = s_block_defs[block_id]; u8 i; /* Check Storm match */ - if (s_block_defs[block_id]->associated_to_storm && + if (block->associated_to_storm && !qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)s_block_defs[block_id]->storm_id)) + (enum dbg_storms)block->storm_id)) return false; - for (i = 0; i < NUM_BIG_RAM_TYPES; i++) - if (mem_group_id == s_big_ram_defs[i].mem_group_id || - mem_group_id == s_big_ram_defs[i].ram_mem_group_id) - return qed_grc_is_included(p_hwfn, - s_big_ram_defs[i].grc_param); - if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id == - MEM_GROUP_PXP_MEM) + for (i = 0; i < NUM_BIG_RAM_TYPES; i++) { + struct big_ram_defs *big_ram = &s_big_ram_defs[i]; + + if (mem_group_id == big_ram->mem_group_id || + mem_group_id == big_ram->ram_mem_group_id) + return qed_grc_is_included(p_hwfn, big_ram->grc_param); + } + + switch (mem_group_id) { + case MEM_GROUP_PXP_ILT: + case MEM_GROUP_PXP_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP); - if (mem_group_id == MEM_GROUP_RAM) + case MEM_GROUP_RAM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM); - if (mem_group_id == MEM_GROUP_PBUF) + case MEM_GROUP_PBUF: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF); - if (mem_group_id == MEM_GROUP_CAU_MEM || - mem_group_id == MEM_GROUP_CAU_SB || - mem_group_id == MEM_GROUP_CAU_PI) + case MEM_GROUP_CAU_MEM: + case MEM_GROUP_CAU_SB: + case MEM_GROUP_CAU_PI: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); - if (mem_group_id == MEM_GROUP_QM_MEM) + case MEM_GROUP_QM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM || - mem_group_id == MEM_GROUP_TASK_CFC_MEM) + case MEM_GROUP_CFC_MEM: + case MEM_GROUP_CONN_CFC_MEM: + case MEM_GROUP_TASK_CFC_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC); - if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id == - MEM_GROUP_IGU_MSIX) + case MEM_GROUP_IGU_MEM: + case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); - if (mem_group_id == MEM_GROUP_MULD_MEM) + case MEM_GROUP_MULD_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD); - if (mem_group_id == MEM_GROUP_PRS_MEM) + case MEM_GROUP_PRS_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS); - if (mem_group_id == MEM_GROUP_DMAE_MEM) + case MEM_GROUP_DMAE_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE); - if (mem_group_id == MEM_GROUP_TM_MEM) + case MEM_GROUP_TM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM); - if (mem_group_id == MEM_GROUP_SDM_MEM) + case MEM_GROUP_SDM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM); - if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id == - MEM_GROUP_RDIF_CTX) + case MEM_GROUP_TDIF_CTX: + case MEM_GROUP_RDIF_CTX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF); - if (mem_group_id == MEM_GROUP_CM_MEM) + case MEM_GROUP_CM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM); - if (mem_group_id == MEM_GROUP_IOR) + case MEM_GROUP_IOR: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR); - - return true; + default: + return true; + } } /* Stalls all Storms */ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool stall) { - u8 reg_val = stall ? 1 : 0; + u32 reg_addr; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - if (qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id)) { - u32 reg_addr = - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STALL_0; + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id)) + continue; - qed_wr(p_hwfn, p_ptt, reg_addr, reg_val); - } + reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + + SEM_FAST_REG_STALL_0_BB_K2; + qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } msleep(STALL_DELAY_MS); @@ -2054,24 +2287,29 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; - u32 i; + u32 block_id, i; /* Fill reset regs values */ - for (i = 0; i < MAX_BLOCK_ID; i++) - if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset) - reg_val[s_block_defs[i]->reset_reg] |= - BIT(s_block_defs[i]->reset_bit_offset); + for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + struct block_defs *block = s_block_defs[block_id]; + + if (block->has_reset_bit && block->unreset) + reg_val[block->reset_reg] |= + BIT(block->reset_bit_offset); + } /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { - reg_val[i] |= s_reset_regs_defs[i].unreset_val; - if (reg_val[i]) - qed_wr(p_hwfn, - p_ptt, - s_reset_regs_defs[i].addr + - RESET_REG_UNRESET_OFFSET, reg_val[i]); - } + if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) + continue; + + reg_val[i] |= s_reset_regs_defs[i].unreset_val; + + if (reg_val[i]) + qed_wr(p_hwfn, + p_ptt, + s_reset_regs_defs[i].addr + + RESET_REG_UNRESET_OFFSET, reg_val[i]); } } @@ -2095,6 +2333,7 @@ qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type, qed_get_block_attn_data(block_id, attn_type); *num_attn_regs = block_type_data->num_regs; + return &((const struct dbg_attn_reg *) s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data-> regs_offset]; @@ -2105,34 +2344,34 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + const struct dbg_attn_reg *attn_reg_arr; u8 reg_idx, num_attn_regs; u32 block_id; for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - const struct dbg_attn_reg *attn_reg_arr; - if (dev_data->block_in_reset[block_id]) continue; attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; + u16 modes_buf_offset; + bool eval_mode; /* Check mode */ - bool eval_mode = GET_FIELD(reg_data->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - u16 modes_buf_offset = + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); + /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) - /* Mode match - read parity status read-clear - * register. - */ qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data-> sts_clr_address)); @@ -2142,11 +2381,11 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - 'count' = num_dumped_entries - * - 'split' = split_type - * - 'id' = split_id (dumped only if split_id >= 0) - * - 'param_name' = param_val (user param, dumped only if param_name != NULL and - * param_val != NULL) + * - count: no. of dumped entries + * - split: split type + * - id: split ID (dumped only if split_id >= 0) + * - param_name: user parameter value (dumped only if param_name != NULL + * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, @@ -2170,84 +2409,100 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, if (param_name && param_val) offset += qed_dump_str_param(dump_buf + offset, dump, param_name, param_val); + return offset; } /* Dumps the GRC registers in the specified address range. * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len) + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus) { u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i; - if (dump) - for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); - else - offset += len; + if (!dump) + return len; + + for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) + *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); + return offset; } -/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */ -static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr, - u32 len) +/* Dumps GRC registers sequence header. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, + bool dump, u32 addr, u32 len) { if (dump) *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT); + return 1; } -/* Dumps GRC registers sequence. Returns the dumped size in dwords. */ +/* Dumps GRC registers sequence. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len) + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus) { u32 offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, - dump_buf + offset, dump, addr, len); + dump_buf + offset, + dump, addr, len, wide_bus); + return offset; } /* Dumps GRC registers sequence with skip cycle. * Returns the dumped size in dwords. + * - addr: start GRC address in dwords + * - total_len: total no. of dwords to dump + * - read_len: no. consecutive dwords to read + * - skip_len: no. of dwords to skip (and fill with zeros) */ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 total_len, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 addr, + u32 total_len, u32 read_len, u32 skip_len) { u32 offset = 0, reg_offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len); - if (dump) { - while (reg_offset < total_len) { - u32 curr_len = min_t(u32, - read_len, - total_len - reg_offset); - offset += qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, addr, curr_len); + + if (!dump) + return offset + total_len; + + while (reg_offset < total_len) { + u32 curr_len = min_t(u32, read_len, total_len - reg_offset); + + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, curr_len, false); + reg_offset += curr_len; + addr += curr_len; + + if (reg_offset < total_len) { + curr_len = min_t(u32, skip_len, total_len - skip_len); + memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len)); + offset += curr_len; reg_offset += curr_len; addr += curr_len; - if (reg_offset < total_len) { - curr_len = min_t(u32, - skip_len, - total_len - skip_len); - memset(dump_buf + offset, 0, - DWORDS_TO_BYTES(curr_len)); - offset += curr_len; - reg_offset += curr_len; - addr += curr_len; - } } - } else { - offset += total_len; } return offset; @@ -2266,43 +2521,48 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, bool mode_match = true; *num_dumped_reg_entries = 0; + while (input_offset < input_regs_arr.size_in_dwords) { const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *) &input_regs_arr.ptr[input_offset++]; - bool eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; + u16 modes_buf_offset; + bool eval_mode; /* Check mode/block */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } - if (mode_match && block_enable[cond_hdr->block_id]) { - for (i = 0; i < cond_hdr->data_size; - i++, input_offset++) { - const struct dbg_dump_reg *reg = - (const struct dbg_dump_reg *) - &input_regs_arr.ptr[input_offset]; - u32 addr, len; - - addr = GET_FIELD(reg->data, - DBG_DUMP_REG_ADDRESS); - len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); - offset += - qed_grc_dump_reg_entry(p_hwfn, p_ptt, - dump_buf + offset, - dump, - addr, - len); - (*num_dumped_reg_entries)++; - } - } else { + if (!mode_match || !block_enable[cond_hdr->block_id]) { input_offset += cond_hdr->data_size; + continue; + } + + for (i = 0; i < cond_hdr->data_size; i++, input_offset++) { + const struct dbg_dump_reg *reg = + (const struct dbg_dump_reg *) + &input_regs_arr.ptr[input_offset]; + u32 addr, len; + bool wide_bus; + + addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS); + len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); + wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + wide_bus); + (*num_dumped_reg_entries)++; } } @@ -2350,8 +2610,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, return num_dumped_reg_entries > 0 ? offset : 0; } -/* Dumps registers according to the input registers array. - * Returns the dumped size in dwords. +/* Dumps registers according to the input registers array. Returns the dumped + * size in dwords. */ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2361,29 +2621,37 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *p_platform_defs; + struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *p_chip_defs; + struct chip_defs *chip; u8 port_id, pf_id, vf_id; u16 fid; - p_chip_defs = &s_chip_defs[dev_data->chip_id]; - p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id]; + chip = &s_chip_defs[dev_data->chip_id]; + chip_platform = &chip->per_platform[dev_data->platform_id]; if (dump) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); + while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { - const struct dbg_dump_split_hdr *split_hdr = + const struct dbg_dump_split_hdr *split_hdr; + struct dbg_array curr_input_regs_arr; + u32 split_data_size; + u8 split_type_id; + + split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - u8 split_type_id = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - u32 split_data_size = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); - struct dbg_array curr_input_regs_arr = { - &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset], - split_data_size}; + split_type_id = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_regs_arr.ptr = + &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; + curr_input_regs_arr.size_in_dwords = split_data_size; switch (split_type_id) { case SPLIT_TYPE_NONE: @@ -2398,8 +2666,9 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_name, param_val); break; + case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < p_platform_defs->num_ports; + for (port_id = 0; port_id < chip_platform->num_ports; port_id++) { if (dump) qed_port_pretend(p_hwfn, p_ptt, @@ -2414,9 +2683,10 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_val); } break; + case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < p_platform_defs->num_pfs; + for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) { u8 pfid_shift = PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; @@ -2427,17 +2697,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, } offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, + qed_grc_dump_split_data(p_hwfn, + p_ptt, curr_input_regs_arr, dump_buf + offset, - dump, block_enable, - "pf", pf_id, + dump, + block_enable, + "pf", + pf_id, param_name, param_val); } break; + case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < p_platform_defs->num_vfs; + for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) { u8 vfvalid_shift = PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; @@ -2460,6 +2734,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_val); } break; + default: break; } @@ -2490,35 +2765,37 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { - u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr); + if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) + continue; - offset += qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - num_regs++; - } + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + BYTES_TO_DWORDS + (s_reset_regs_defs[i].addr), 1, + false); + num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, NULL, NULL); + return offset; } -/* Dump registers that are modified during GRC Dump and therefore must be dumped - * first. Returns the dumped size in dwords. +/* Dump registers that are modified during GRC Dump and therefore must be + * dumped first. Returns the dumped size in dwords. */ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 offset = 0, num_reg_entries = 0, block_id; + u32 block_id, offset = 0, num_reg_entries = 0; + const struct dbg_attn_reg *attn_reg_arr; u8 storm_id, reg_idx, num_attn_regs; /* Calculate header size */ @@ -2527,14 +2804,13 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - const struct dbg_attn_reg *attn_reg_arr; - if (dev_data->block_in_reset[block_id] && dump) continue; attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; @@ -2548,37 +2824,36 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - if (!eval_mode || - qed_is_mode_match(p_hwfn, &modes_buf_offset)) { - /* Mode match - read and dump registers */ - addr = reg_data->mask_address; - offset += - qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - addr = GET_FIELD(reg_data->data, - DBG_ATTN_REG_STS_ADDRESS); - offset += - qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - 1); - num_reg_entries += 2; - } + if (eval_mode && + !qed_is_mode_match(p_hwfn, &modes_buf_offset)) + continue; + + /* Mode match: read & dump registers */ + addr = reg_data->mask_address; + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false); + addr = GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false); + num_reg_entries += 2; } } - /* Write storm stall status registers */ + /* Write Storm stall status registers */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 addr; - if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] && - dump) + if (dev_data->block_in_reset[storm->block_id] && dump) continue; addr = @@ -2589,7 +2864,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1); + 1, + false); num_reg_entries++; } @@ -2598,6 +2874,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, NULL, NULL); + return offset; } @@ -2637,17 +2914,17 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, return offset; } -/* Dumps a GRC memory header (section and params). - * The following parameters are dumped: - * name - name is dumped only if it's not NULL. - * addr - addr is dumped only if name is NULL. - * len - len is always dumped. - * width - bit_width is dumped if it's not zero. - * packed - packed=1 is dumped if it's not false. - * mem_group - mem_group is always dumped. - * is_storm - true only if the memory is related to a Storm. - * storm_letter - storm letter (valid only if is_storm is true). - * Returns the dumped size in dwords. +/* Dumps a GRC memory header (section and params). Returns the dumped size in + * dwords. The following parameters are dumped: + * - name: dumped only if it's not NULL. + * - addr: in dwords, dumped only if name is NULL. + * - len: in dwords, always dumped. + * - width: dumped if it's not zero. + * - packed: dumped only if it's not false. + * - mem_group: always dumped. + * - is_storm: true only if the memory is related to a Storm. + * - storm_letter: valid only if is_storm is true. + * */ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -2667,6 +2944,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, if (!len) DP_NOTICE(p_hwfn, "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); + if (bit_width) num_params++; if (packed) @@ -2675,6 +2953,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, /* Dump section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params); + if (name) { /* Dump name */ if (is_storm) { @@ -2694,14 +2973,15 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, len, buf); } else { /* Dump address */ + u32 addr_in_bytes = DWORDS_TO_BYTES(addr); + offset += qed_dump_num_param(dump_buf + offset, - dump, "addr", - DWORDS_TO_BYTES(addr)); + dump, "addr", addr_in_bytes); if (dump && len > 64) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", - len, (u32)DWORDS_TO_BYTES(addr)); + len, addr_in_bytes); } /* Dump len */ @@ -2727,11 +3007,13 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, } offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf); + return offset; } /* Dumps a single GRC memory. If name is NULL, the memory is stored by address. * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2740,6 +3022,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, const char *name, u32 addr, u32 len, + bool wide_bus, u32 bit_width, bool packed, const char *mem_group, @@ -2758,7 +3041,9 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, mem_group, is_storm, storm_letter); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, - dump_buf + offset, dump, addr, len); + dump_buf + offset, + dump, addr, len, wide_bus); + return offset; } @@ -2773,20 +3058,21 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, while (input_offset < input_mems_arr.size_in_dwords) { const struct dbg_dump_cond_hdr *cond_hdr; + u16 modes_buf_offset; u32 num_entries; bool eval_mode; cond_hdr = (const struct dbg_dump_cond_hdr *) &input_mems_arr.ptr[input_offset++]; - eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; + num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; /* Check required mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } @@ -2796,81 +3082,87 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, continue; } - num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *) &input_mems_arr.ptr[input_offset]; - u8 mem_group_id; + u8 mem_group_id = GET_FIELD(mem->dword0, + DBG_DUMP_MEM_MEM_GROUP_ID); + bool is_storm = false, mem_wide_bus; + enum dbg_grc_params grc_param; + char storm_letter = 'a'; + enum block_id block_id; + u32 mem_addr, mem_len; - mem_group_id = GET_FIELD(mem->dword0, - DBG_DUMP_MEM_MEM_GROUP_ID); if (mem_group_id >= MEM_GROUPS_NUM) { DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); return 0; } - if (qed_grc_is_mem_included(p_hwfn, - (enum block_id)cond_hdr->block_id, - mem_group_id)) { - u32 mem_addr = GET_FIELD(mem->dword0, - DBG_DUMP_MEM_ADDRESS); - u32 mem_len = GET_FIELD(mem->dword1, - DBG_DUMP_MEM_LENGTH); - enum dbg_grc_params grc_param; - char storm_letter = 'a'; - bool is_storm = false; - - /* Update memory length for CCFC/TCFC memories - * according to number of LCIDs/LTIDs. - */ - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { - if (mem_len % MAX_LCIDS != 0) { - DP_NOTICE(p_hwfn, - "Invalid CCFC connection memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LCIDS; - mem_len = qed_grc_get_param(p_hwfn, - grc_param) * - (mem_len / MAX_LCIDS); - } else if (mem_group_id == - MEM_GROUP_TASK_CFC_MEM) { - if (mem_len % MAX_LTIDS != 0) { - DP_NOTICE(p_hwfn, - "Invalid TCFC task memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LTIDS; - mem_len = qed_grc_get_param(p_hwfn, - grc_param) * - (mem_len / MAX_LTIDS); + block_id = (enum block_id)cond_hdr->block_id; + if (!qed_grc_is_mem_included(p_hwfn, + block_id, + mem_group_id)) + continue; + + mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS); + mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH); + mem_wide_bus = GET_FIELD(mem->dword1, + DBG_DUMP_MEM_WIDE_BUS); + + /* Update memory length for CCFC/TCFC memories + * according to number of LCIDs/LTIDs. + */ + if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { + if (mem_len % MAX_LCIDS) { + DP_NOTICE(p_hwfn, + "Invalid CCFC connection memory size\n"); + return 0; } - /* If memory is associated with Storm, update - * Storm details. - */ - if (s_block_defs[cond_hdr->block_id]-> - associated_to_storm) { - is_storm = true; - storm_letter = - s_storm_defs[s_block_defs[ - cond_hdr->block_id]-> - storm_id].letter; + grc_param = DBG_GRC_PARAM_NUM_LCIDS; + mem_len = qed_grc_get_param(p_hwfn, grc_param) * + (mem_len / MAX_LCIDS); + } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) { + if (mem_len % MAX_LTIDS) { + DP_NOTICE(p_hwfn, + "Invalid TCFC task memory size\n"); + return 0; } - /* Dump memory */ - offset += qed_grc_dump_mem(p_hwfn, p_ptt, - dump_buf + offset, dump, NULL, - mem_addr, mem_len, 0, + grc_param = DBG_GRC_PARAM_NUM_LTIDS; + mem_len = qed_grc_get_param(p_hwfn, grc_param) * + (mem_len / MAX_LTIDS); + } + + /* If memory is associated with Storm, update Storm + * details. + */ + if (s_block_defs + [cond_hdr->block_id]->associated_to_storm) { + is_storm = true; + storm_letter = + s_storm_defs[s_block_defs + [cond_hdr->block_id]-> + storm_id].letter; + } + + /* Dump memory */ + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + NULL, + mem_addr, + mem_len, + mem_wide_bus, + 0, false, s_mem_group_names[mem_group_id], - is_storm, storm_letter); - } - } + is_storm, + storm_letter); + } } return offset; @@ -2887,16 +3179,22 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { - const struct dbg_dump_split_hdr *split_hdr = - (const struct dbg_dump_split_hdr *) + const struct dbg_dump_split_hdr *split_hdr; + struct dbg_array curr_input_mems_arr; + u32 split_data_size; + u8 split_type_id; + + split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - u8 split_type_id = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - u32 split_data_size = GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); - struct dbg_array curr_input_mems_arr = { - &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset], - split_data_size}; + split_type_id = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_mems_arr.ptr = + &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; + curr_input_mems_arr.size_in_dwords = split_data_size; switch (split_type_id) { case SPLIT_TYPE_NONE: @@ -2906,6 +3204,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); break; + default: DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); @@ -2920,6 +3219,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, /* Dumps GRC context data for the specified Storm. * Returns the dumped size in dwords. + * The lid_size argument is specified in quad-regs. */ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2931,13 +3231,15 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, u32 rd_reg_addr, u8 storm_id) { - u32 i, lid, total_size; - u32 offset = 0; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 i, lid, total_size, offset = 0; if (!lid_size) return 0; + lid_size *= BYTES_IN_DWORD; total_size = num_lids * lid_size; + offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, @@ -2945,25 +3247,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, 0, total_size, lid_size * 32, - false, - name, - true, s_storm_defs[storm_id].letter); + false, name, true, storm->letter); + + if (!dump) + return offset + total_size; /* Dump context data */ - if (dump) { - for (lid = 0; lid < num_lids; lid++) { - for (i = 0; i < lid_size; i++, offset++) { - qed_wr(p_hwfn, - p_ptt, - s_storm_defs[storm_id].cm_ctx_wr_addr, - BIT(9) | lid); - *(dump_buf + offset) = qed_rd(p_hwfn, - p_ptt, - rd_reg_addr); - } + for (lid = 0; lid < num_lids; lid++) { + for (i = 0; i < lid_size; i++, offset++) { + qed_wr(p_hwfn, + p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid); + *(dump_buf + offset) = qed_rd(p_hwfn, + p_ptt, rd_reg_addr); } - } else { - offset += total_size; } return offset; @@ -2973,15 +3269,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { + enum dbg_grc_params grc_param; u32 offset = 0; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id)) continue; /* Dump Conn AG context size */ + grc_param = DBG_GRC_PARAM_NUM_LCIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -2989,14 +3289,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "CONN_AG_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS), - s_storm_defs[storm_id]. - cm_conn_ag_ctx_lid_size, - s_storm_defs[storm_id]. - cm_conn_ag_ctx_rd_addr, + grc_param), + storm->cm_conn_ag_ctx_lid_size, + storm->cm_conn_ag_ctx_rd_addr, storm_id); /* Dump Conn ST context size */ + grc_param = DBG_GRC_PARAM_NUM_LCIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3004,14 +3303,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "CONN_ST_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS), - s_storm_defs[storm_id]. - cm_conn_st_ctx_lid_size, - s_storm_defs[storm_id]. - cm_conn_st_ctx_rd_addr, + grc_param), + storm->cm_conn_st_ctx_lid_size, + storm->cm_conn_st_ctx_rd_addr, storm_id); /* Dump Task AG context size */ + grc_param = DBG_GRC_PARAM_NUM_LTIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3019,14 +3317,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "TASK_AG_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS), - s_storm_defs[storm_id]. - cm_task_ag_ctx_lid_size, - s_storm_defs[storm_id]. - cm_task_ag_ctx_rd_addr, + grc_param), + storm->cm_task_ag_ctx_lid_size, + storm->cm_task_ag_ctx_rd_addr, storm_id); /* Dump Task ST context size */ + grc_param = DBG_GRC_PARAM_NUM_LTIDS; offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, @@ -3034,11 +3331,9 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, dump, "TASK_ST_CTX", qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS), - s_storm_defs[storm_id]. - cm_task_st_ctx_lid_size, - s_storm_defs[storm_id]. - cm_task_st_ctx_rd_addr, + grc_param), + storm->cm_task_st_ctx_lid_size, + storm->cm_task_st_ctx_rd_addr, storm_id); } @@ -3050,8 +3345,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { char buf[10] = "IOR_SET_?"; + u32 addr, offset = 0; u8 storm_id, set_id; - u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { struct storm_defs *storm = &s_storm_defs[storm_id]; @@ -3061,11 +3356,9 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, continue; for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { - u32 dwords, addr; - - dwords = storm->sem_fast_mem_addr + - SEM_FAST_REG_STORM_REG_FILE; - addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id); + addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + + SEM_FAST_REG_STORM_REG_FILE) + + IOR_SET_OFFSET(set_id); buf[strlen(buf) - 1] = '0' + set_id; offset += qed_grc_dump_mem(p_hwfn, p_ptt, @@ -3074,6 +3367,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, buf, addr, IORS_PER_SET, + false, 32, false, "ior", @@ -3091,10 +3385,10 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u8 storm_id) { u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; - u32 offset = 0; - u32 row, i; + u32 row, i, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3103,38 +3397,34 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, 0, total_size, 256, - false, - "vfc_cam", - true, s_storm_defs[storm_id].letter); - if (dump) { - /* Prepare CAM address */ - SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); - for (row = 0; row < VFC_CAM_NUM_ROWS; - row++, offset += VFC_CAM_RESP_DWORDS) { - /* Write VFC CAM command */ - SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); - ARR_REG_WR(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_WR, - cam_cmd, VFC_CAM_CMD_DWORDS); + false, "vfc_cam", true, storm->letter); - /* Write VFC CAM address */ - ARR_REG_WR(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_ADDR, - cam_addr, VFC_CAM_ADDR_DWORDS); + if (!dump) + return offset + total_size; - /* Read VFC CAM read response */ - ARR_REG_RD(p_hwfn, - p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_RD, - dump_buf + offset, VFC_CAM_RESP_DWORDS); - } - } else { - offset += total_size; + /* Prepare CAM address */ + SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); + + for (row = 0; row < VFC_CAM_NUM_ROWS; + row++, offset += VFC_CAM_RESP_DWORDS) { + /* Write VFC CAM command */ + SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); + ARR_REG_WR(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, + cam_cmd, VFC_CAM_CMD_DWORDS); + + /* Write VFC CAM address */ + ARR_REG_WR(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, + cam_addr, VFC_CAM_ADDR_DWORDS); + + /* Read VFC CAM read response */ + ARR_REG_RD(p_hwfn, + p_ptt, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, + dump_buf + offset, VFC_CAM_RESP_DWORDS); } return offset; @@ -3148,10 +3438,10 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, u8 storm_id, struct vfc_ram_defs *ram_defs) { u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; - u32 offset = 0; - u32 row, i; + u32 row, i, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3162,7 +3452,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, 256, false, ram_defs->type_name, - true, s_storm_defs[storm_id].letter); + true, storm->letter); /* Prepare RAM address */ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); @@ -3176,23 +3466,20 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, /* Write VFC RAM command */ ARR_REG_WR(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_WR, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS); /* Write VFC RAM address */ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); ARR_REG_WR(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_ADDR, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS); /* Read VFC RAM read response */ ARR_REG_RD(p_hwfn, p_ptt, - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_VFC_DATA_RD, + storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS); } @@ -3208,28 +3495,27 @@ static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - if (qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id) && - s_storm_defs[storm_id].has_vfc && - (storm_id != DBG_PSTORM_ID || - dev_data->platform_id == PLATFORM_ASIC)) { - /* Read CAM */ - offset += qed_grc_dump_vfc_cam(p_hwfn, + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id) || + !s_storm_defs[storm_id].has_vfc || + (storm_id == DBG_PSTORM_ID && dev_data->platform_id != + PLATFORM_ASIC)) + continue; + + /* Read CAM */ + offset += qed_grc_dump_vfc_cam(p_hwfn, + p_ptt, + dump_buf + offset, + dump, storm_id); + + /* Read RAM */ + for (i = 0; i < NUM_VFC_RAM_TYPES; i++) + offset += qed_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, - dump, storm_id); - - /* Read RAM */ - for (i = 0; i < NUM_VFC_RAM_TYPES; i++) - offset += qed_grc_dump_vfc_ram(p_hwfn, - p_ptt, - dump_buf + - offset, - dump, - storm_id, - &s_vfc_ram_defs - [i]); - } + dump, + storm_id, + &s_vfc_ram_defs[i]); } return offset; @@ -3244,14 +3530,17 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, u8 rss_mem_id; for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { - struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id]; - u32 num_entries = rss_defs->num_entries[dev_data->chip_id]; - u32 entry_width = rss_defs->entry_width[dev_data->chip_id]; - u32 total_dwords = (num_entries * entry_width) / 32; - u32 size = RSS_REG_RSS_RAM_DATA_SIZE; - bool packed = (entry_width == 16); - u32 rss_addr = rss_defs->addr; - u32 i, addr; + u32 rss_addr, num_entries, entry_width, total_dwords, i; + struct rss_mem_defs *rss_defs; + u32 addr, size; + bool packed; + + rss_defs = &s_rss_mem_defs[rss_mem_id]; + rss_addr = rss_defs->addr; + num_entries = rss_defs->num_entries[dev_data->chip_id]; + entry_width = rss_defs->entry_width[dev_data->chip_id]; + total_dwords = (num_entries * entry_width) / 32; + packed = (entry_width == 16); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3263,23 +3552,23 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, packed, rss_defs->type_name, false, 0); + /* Dump RSS data */ if (!dump) { offset += total_dwords; continue; } - /* Dump RSS data */ - for (i = 0; i < total_dwords; - i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) { - addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + size = RSS_REG_RSS_RAM_DATA_SIZE; + for (i = 0; i < total_dwords; i += size, rss_addr++) { qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); - offset += qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + - offset, - dump, - addr, - size); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + size, + false); } } @@ -3316,10 +3605,11 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0); + /* Read and dump Big RAM data */ if (!dump) return offset + ram_size; - /* Read and dump Big RAM data */ + /* Dump Big RAM */ for (i = 0; i < total_blocks / 2; i++) { u32 addr, len; @@ -3331,7 +3621,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - len); + len, + false); } return offset; @@ -3359,7 +3650,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, - 0, false, "MCP", false, 0); + false, 0, false, "MCP", false, 0); /* Dump MCP cpu_reg_file */ offset += qed_grc_dump_mem(p_hwfn, @@ -3369,7 +3660,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, - 0, false, "MCP", false, 0); + false, 0, false, "MCP", false, 0); /* Dump MCP registers */ block_enable[BLOCK_MCP] = true; @@ -3387,11 +3678,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1); + 1, + false); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); + return offset; } @@ -3404,14 +3697,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, u8 phy_id; for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) { - struct phy_defs *phy_defs = &s_phy_defs[phy_id]; - int printed_chars; - - printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s", - phy_defs->phy_name); - if (printed_chars < 0 || printed_chars >= sizeof(mem_name)) + u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr; + struct phy_defs *phy_defs; + u8 *bytes_buf; + + phy_defs = &s_phy_defs[phy_id]; + addr_lo_addr = phy_defs->base_addr + + phy_defs->tbus_addr_lo_addr; + addr_hi_addr = phy_defs->base_addr + + phy_defs->tbus_addr_hi_addr; + data_lo_addr = phy_defs->base_addr + + phy_defs->tbus_data_lo_addr; + data_hi_addr = phy_defs->base_addr + + phy_defs->tbus_data_hi_addr; + bytes_buf = (u8 *)(dump_buf + offset); + + if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", + phy_defs->phy_name) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid PHY memory name\n"); + offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, @@ -3419,34 +3724,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0); - if (dump) { - u32 addr_lo_addr = phy_defs->base_addr + - phy_defs->tbus_addr_lo_addr; - u32 addr_hi_addr = phy_defs->base_addr + - phy_defs->tbus_addr_hi_addr; - u32 data_lo_addr = phy_defs->base_addr + - phy_defs->tbus_data_lo_addr; - u32 data_hi_addr = phy_defs->base_addr + - phy_defs->tbus_data_hi_addr; - u8 *bytes_buf = (u8 *)(dump_buf + offset); - - for (tbus_hi_offset = 0; - tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); - tbus_hi_offset++) { + + if (!dump) { + offset += PHY_DUMP_SIZE_DWORDS; + continue; + } + + for (tbus_hi_offset = 0; + tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); + tbus_hi_offset++) { + qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset); + for (tbus_lo_offset = 0; tbus_lo_offset < 256; + tbus_lo_offset++) { qed_wr(p_hwfn, - p_ptt, addr_hi_addr, tbus_hi_offset); - for (tbus_lo_offset = 0; tbus_lo_offset < 256; - tbus_lo_offset++) { - qed_wr(p_hwfn, - p_ptt, - addr_lo_addr, tbus_lo_offset); - *(bytes_buf++) = - (u8)qed_rd(p_hwfn, p_ptt, - data_lo_addr); - *(bytes_buf++) = - (u8)qed_rd(p_hwfn, p_ptt, - data_hi_addr); - } + p_ptt, addr_lo_addr, tbus_lo_offset); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_lo_addr); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_hi_addr); } } @@ -3460,16 +3757,17 @@ static void qed_config_dbg_line(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum block_id block_id, u8 line_id, - u8 cycle_en, - u8 right_shift, u8 force_valid, u8 force_frame) + u8 enable_mask, + u8 right_shift, + u8 force_valid_mask, u8 force_frame_mask) { - struct block_defs *p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid); - qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame); + qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id); + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask); + qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift); + qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask); + qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask); } /* Dumps Static Debug data. Returns the dumped size in dwords. */ @@ -3477,10 +3775,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 offset = 0, block_id, line_id; - struct block_defs *p_block_defs; + u32 block_id, line_id, offset = 0; + + /* Skip static debug if a debug bus recording is in progress */ + if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) + return 0; if (dump) { DP_VERBOSE(p_hwfn, @@ -3488,11 +3788,11 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Disable all blocks debug output */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; - if (p_block_defs->has_dbg_bus[dev_data->chip_id]) - qed_wr(p_hwfn, p_ptt, - p_block_defs->dbg_cycle_enable_addr, 0); + if (block->has_dbg_bus[dev_data->chip_id]) + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, + 0); } qed_bus_reset_dbg_block(p_hwfn, p_ptt); @@ -3506,59 +3806,71 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Dump all static debug lines for each relevant block */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - p_block_defs = s_block_defs[block_id]; + struct block_defs *block = s_block_defs[block_id]; + struct dbg_bus_block *block_desc; + u32 block_dwords, addr, len; + u8 dbg_client_id; - if (!p_block_defs->has_dbg_bus[dev_data->chip_id]) + if (!block->has_dbg_bus[dev_data->chip_id]) continue; + block_desc = + get_dbg_bus_block_desc(p_hwfn, + (enum block_id)block_id); + block_dwords = NUM_DBG_LINES(block_desc) * + STATIC_DEBUG_LINE_DWORDS; + /* Dump static section params */ offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, - p_block_defs->name, 0, - block_dwords, 32, false, - "STATIC", false, 0); - - if (dump && !dev_data->block_in_reset[block_id]) { - u8 dbg_client_id = - p_block_defs->dbg_client_id[dev_data->chip_id]; - u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); - u32 len = STATIC_DEBUG_LINE_DWORDS; - - /* Enable block's client */ - qed_bus_enable_clients(p_hwfn, p_ptt, - BIT(dbg_client_id)); - - for (line_id = 0; line_id < NUM_DBG_BUS_LINES; - line_id++) { - /* Configure debug line ID */ - qed_config_dbg_line(p_hwfn, - p_ptt, - (enum block_id)block_id, - (u8)line_id, - 0xf, 0, 0, 0); + block->name, + 0, + block_dwords, + 32, false, "STATIC", false, 0); - /* Read debug line info */ - offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - len); - } + if (!dump) { + offset += block_dwords; + continue; + } - /* Disable block's client and debug output */ - qed_bus_enable_clients(p_hwfn, p_ptt, 0); - qed_wr(p_hwfn, p_ptt, - p_block_defs->dbg_cycle_enable_addr, 0); - } else { - /* All lines are invalid - dump zeros */ - if (dump) - memset(dump_buf + offset, 0, - DWORDS_TO_BYTES(block_dwords)); + /* If all lines are invalid - dump zeros */ + if (dev_data->block_in_reset[block_id]) { + memset(dump_buf + offset, 0, + DWORDS_TO_BYTES(block_dwords)); offset += block_dwords; + continue; + } + + /* Enable block's client */ + dbg_client_id = block->dbg_client_id[dev_data->chip_id]; + qed_bus_enable_clients(p_hwfn, + p_ptt, + BIT(dbg_client_id)); + + addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); + len = STATIC_DEBUG_LINE_DWORDS; + for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); + line_id++) { + /* Configure debug line ID */ + qed_config_dbg_line(p_hwfn, + p_ptt, + (enum block_id)block_id, + (u8)line_id, 0xf, 0, 0, 0); + + /* Read debug line info */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + true); } + + /* Disable block's client and debug output */ + qed_bus_enable_clients(p_hwfn, p_ptt, 0); + qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0); } if (dump) { @@ -3584,8 +3896,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, *num_dumped_dwords = 0; - /* Find port mode */ if (dump) { + /* Find port mode */ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { case 0: port_mode = 1; @@ -3597,11 +3909,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, port_mode = 4; break; } - } - /* Update reset state */ - if (dump) + /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); + } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3635,7 +3946,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, } /* Disable all parities using MFW command */ - if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); if (!parities_masked) { DP_NOTICE(p_hwfn, @@ -3661,9 +3973,9 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, /* Dump all regs */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) { - /* Dump all blocks except MCP */ bool block_enable[MAX_BLOCK_ID]; + /* Dump all blocks except MCP */ for (i = 0; i < MAX_BLOCK_ID; i++) block_enable[i] = true; block_enable[BLOCK_MCP] = false; @@ -3732,7 +4044,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); /* Dump last section */ - offset += qed_dump_last_section(dump_buf, offset, dump); + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + if (dump) { /* Unstall storms */ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL)) @@ -3763,19 +4076,20 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, const struct dbg_idle_chk_rule *rule, u16 fail_entry_id, u32 *cond_reg_values) { - const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *) - s_dbg_arrays - [BIN_BUF_DBG_IDLE_CHK_REGS]. - ptr)[rule->reg_offset]; - const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct dbg_idle_chk_result_hdr *hdr = - (struct dbg_idle_chk_result_hdr *)dump_buf; - const struct dbg_idle_chk_info_reg *info_regs = - ®s[rule->num_cond_regs].info_reg; - u32 next_reg_offset = 0, i, offset = 0; + const struct dbg_idle_chk_cond_reg *cond_regs; + const struct dbg_idle_chk_info_reg *info_regs; + u32 i, next_reg_offset = 0, offset = 0; + struct dbg_idle_chk_result_hdr *hdr; + const union dbg_idle_chk_reg *regs; u8 reg_id; + hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; + regs = &((const union dbg_idle_chk_reg *) + s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset]; + cond_regs = ®s[0].cond_reg; + info_regs = ®s[rule->num_cond_regs].info_reg; + /* Dump rule data */ if (dump) { memset(hdr, 0, sizeof(*hdr)); @@ -3790,33 +4104,31 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, /* Dump condition register values */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; + struct dbg_idle_chk_result_reg_hdr *reg_hdr; - /* Write register header */ - if (dump) { - struct dbg_idle_chk_result_reg_hdr *reg_hdr = - (struct dbg_idle_chk_result_reg_hdr *)(dump_buf - + offset); - offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; - memset(reg_hdr, 0, - sizeof(struct dbg_idle_chk_result_reg_hdr)); - reg_hdr->start_entry = reg->start_entry; - reg_hdr->size = reg->entry_size; - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, - reg->num_entries > 1 || reg->start_entry > 0 - ? 1 : 0); - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); + reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) + (dump_buf + offset); - /* Write register values */ - for (i = 0; i < reg_hdr->size; - i++, next_reg_offset++, offset++) - dump_buf[offset] = - cond_reg_values[next_reg_offset]; - } else { + /* Write register header */ + if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size; + continue; } + + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->start_entry = reg->start_entry; + reg_hdr->size = reg->entry_size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, + reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0); + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); + + /* Write register values */ + for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++) + dump_buf[offset] = cond_reg_values[next_reg_offset]; } /* Dump info register values */ @@ -3824,12 +4136,12 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id]; u32 block_id; + /* Check if register's block is in reset */ if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size; continue; } - /* Check if register's block is in reset */ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); @@ -3837,47 +4149,50 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, } if (!dev_data->block_in_reset[block_id]) { - bool eval_mode = GET_FIELD(reg->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - bool mode_match = true; + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool wide_bus, eval_mode, mode_match = true; + u16 modes_buf_offset; + u32 addr; + + reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) + (dump_buf + offset); /* Check mode */ + eval_mode = GET_FIELD(reg->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = - GET_FIELD(reg->mode.data, - DBG_MODE_HDR_MODES_BUF_OFFSET); + modes_buf_offset = + GET_FIELD(reg->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } - if (mode_match) { - u32 addr = - GET_FIELD(reg->data, - DBG_IDLE_CHK_INFO_REG_ADDRESS); - - /* Write register header */ - struct dbg_idle_chk_result_reg_hdr *reg_hdr = - (struct dbg_idle_chk_result_reg_hdr *) - (dump_buf + offset); - - offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; - hdr->num_dumped_info_regs++; - memset(reg_hdr, 0, sizeof(*reg_hdr)); - reg_hdr->size = reg->size; - SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, - rule->num_cond_regs + reg_id); - - /* Write register values */ - offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - addr, - reg->size); - } + if (!mode_match) + continue; + + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_ADDRESS); + wide_bus = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_WIDE_BUS); + + /* Write register header */ + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + hdr->num_dumped_info_regs++; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->size = reg->size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, + rule->num_cond_regs + reg_id); + + /* Write register values */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + reg->size, wide_bus); } } @@ -3898,6 +4213,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 reg_id; *num_failing_rules = 0; + for (i = 0; i < num_input_rules; i++) { const struct dbg_idle_chk_cond_reg *cond_regs; const struct dbg_idle_chk_rule *rule; @@ -3920,8 +4236,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, */ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) { - u32 block_id = GET_FIELD(cond_regs[reg_id].data, - DBG_IDLE_CHK_COND_REG_BLOCK_ID); + u32 block_id = + GET_FIELD(cond_regs[reg_id].data, + DBG_IDLE_CHK_COND_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); @@ -3936,48 +4253,47 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (!check_rule && dump) continue; - if (!dump) { - u32 entry_dump_size = - qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - false, - rule->rule_id, - rule, - 0, - NULL); - - offset += num_reg_entries * entry_dump_size; - (*num_failing_rules) += num_reg_entries; - continue; - } - /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { - /* Read current entry of all condition registers */ u32 next_reg_offset = 0; + if (!dump) { + offset += qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + entry_id, + NULL); + (*num_failing_rules)++; + break; + } + + /* Read current entry of all condition registers */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = - &cond_regs[reg_id]; + &cond_regs[reg_id]; + u32 padded_entry_size, addr; + bool wide_bus; - /* Find GRC address (if it's a memory,the + /* Find GRC address (if it's a memory, the * address of the specific entry is calculated). */ - u32 addr = + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_COND_REG_ADDRESS); + wide_bus = GET_FIELD(reg->data, - DBG_IDLE_CHK_COND_REG_ADDRESS); - + DBG_IDLE_CHK_COND_REG_WIDE_BUS); if (reg->num_entries > 1 || reg->start_entry > 0) { - u32 padded_entry_size = - reg->entry_size > 1 ? - roundup_pow_of_two(reg->entry_size) : - 1; - + padded_entry_size = + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) + : 1; addr += (reg->start_entry + entry_id) * padded_entry_size; } @@ -3991,28 +4307,27 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } next_reg_offset += - qed_grc_dump_addr_range(p_hwfn, - p_ptt, + qed_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, - reg->entry_size); + reg->entry_size, + wide_bus); } - /* Call rule's condition function - a return value of - * true indicates failure. + /* Call rule condition function. + * If returns true, it's a failure. */ - if ((*cond_arr[rule->cond_id])(cond_reg_values, - imm_values)) { - offset += - qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - rule->rule_id, - rule, - entry_id, - cond_reg_values); + if ((*cond_arr[rule->cond_id]) (cond_reg_values, + imm_values)) { + offset += qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rule->rule_id, + rule, + entry_id, + cond_reg_values); (*num_failing_rules)++; break; } @@ -4028,8 +4343,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - u32 offset = 0, input_offset = 0, num_failing_rules = 0; - u32 num_failing_rules_offset; + u32 num_failing_rules_offset, offset = 0, input_offset = 0; + u32 num_failing_rules = 0; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -4042,29 +4357,29 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1); num_failing_rules_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); + while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) { const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *) &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr [input_offset++]; - bool eval_mode = GET_FIELD(cond_hdr->mode.data, - DBG_MODE_HDR_EVAL_MODE) > 0; - bool mode_match = true; + bool eval_mode, mode_match = true; + u32 curr_failing_rules; + u16 modes_buf_offset; /* Check mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { - u16 modes_buf_offset = + modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); - mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (mode_match) { - u32 curr_failing_rules; - offset += qed_idle_chk_dump_rule_entries(p_hwfn, p_ptt, @@ -4086,10 +4401,13 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules); + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + return offset; } -/* Finds the meta data image in NVRAM. */ +/* Finds the meta data image in NVRAM */ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 image_type, @@ -4098,16 +4416,16 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, { u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; struct mcp_file_att file_att; + int nvm_result; /* Call NVRAM get file command */ - int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, - p_ptt, - DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, - &ret_mcp_resp, - &ret_mcp_param, - &ret_txn_size, - (u32 *)&file_att); + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, (u32 *)&file_att); /* Check response */ if (nvm_result || @@ -4117,6 +4435,7 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, /* Update return values */ *nvram_offset_bytes = file_att.nvm_start_addr; *nvram_size_bytes = file_att.len; + DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", @@ -4125,22 +4444,25 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, /* Check alignment */ if (*nvram_size_bytes & 0x3) return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + return DBG_STATUS_OK; } +/* Reads data from NVRAM */ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_bytes, u32 nvram_size_bytes, u32 *ret_buf) { - u32 ret_mcp_resp, ret_mcp_param, ret_read_size; - u32 bytes_to_copy, read_offset = 0; + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes); + do { bytes_to_copy = (bytes_left > @@ -4155,8 +4477,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, DRV_MB_PARAM_NVM_LEN_SHIFT), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, - (u32 *)((u8 *)ret_buf + - read_offset)) != 0) + (u32 *)((u8 *)ret_buf + read_offset))) return DBG_STATUS_NVRAM_READ_FAILED; /* Check response */ @@ -4172,24 +4493,20 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, } /* Get info on the MCP Trace data in the scratchpad: - * - trace_data_grc_addr - the GRC address of the trace data - * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without - * the header) + * - trace_data_grc_addr (OUT): trace data GRC address in bytes + * - trace_data_size (OUT): trace data size in bytes (without the header) */ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *trace_data_grc_addr, - u32 *trace_data_size_bytes) + u32 *trace_data_size) { - /* Read MCP trace section offsize structure from MCP scratchpad */ - u32 spad_trace_offsize = qed_rd(p_hwfn, - p_ptt, - MCP_SPAD_TRACE_OFFSIZE_ADDR); - u32 signature; + u32 spad_trace_offsize, signature; - /* Extract MCP trace section GRC address from offsize structure (within - * scratchpad). - */ + /* Read trace section offsize structure from MCP scratchpad */ + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); + + /* Extract trace section address from offsize (in scratchpad) */ *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize); @@ -4197,42 +4514,41 @@ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, signature = qed_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, signature)); + if (signature != MFW_TRACE_SIGNATURE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read trace size from MCP trace section */ - *trace_data_size_bytes = qed_rd(p_hwfn, - p_ptt, - *trace_data_grc_addr + - offsetof(struct mcp_trace, size)); + *trace_data_size = qed_rd(p_hwfn, + p_ptt, + *trace_data_grc_addr + + offsetof(struct mcp_trace, size)); + return DBG_STATUS_OK; } -/* Reads MCP trace meta data image from NVRAM. - * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from - * file) - * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP - * Trace meta data starts (invalid when loaded from file) - * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data +/* Reads MCP trace meta data image from NVRAM + * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file) + * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when + * loaded from file). + * - trace_meta_size (OUT): size in bytes of the trace meta data. */ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 trace_data_size_bytes, u32 *running_bundle_id, - u32 *trace_meta_offset_bytes, - u32 *trace_meta_size_bytes) + u32 *trace_meta_offset, + u32 *trace_meta_size) { + u32 spad_trace_offsize, nvram_image_type, running_mfw_addr; + /* Read MCP trace section offsize structure from MCP scratchpad */ - u32 spad_trace_offsize = qed_rd(p_hwfn, - p_ptt, - MCP_SPAD_TRACE_OFFSIZE_ADDR); + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); /* Find running bundle ID */ - u32 running_mfw_addr = + running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; - u32 nvram_image_type; - *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); if (*running_bundle_id > 1) return DBG_STATUS_INVALID_NVRAM_BUNDLE; @@ -4241,40 +4557,33 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; - return qed_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, - trace_meta_offset_bytes, - trace_meta_size_bytes); + trace_meta_offset, trace_meta_size); } -/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified - * buffer. - */ +/* Reads the MCP Trace meta data from NVRAM into the specified buffer */ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_in_bytes, u32 size_in_bytes, u32 *buf) { - u8 *byte_buf = (u8 *)buf; - u8 modules_num, i; + u8 modules_num, module_len, i, *byte_buf = (u8 *)buf; + enum dbg_status status; u32 signature; /* Read meta data from NVRAM */ - enum dbg_status status = qed_nvram_read(p_hwfn, - p_ptt, - nvram_offset_in_bytes, - size_in_bytes, - buf); - + status = qed_nvram_read(p_hwfn, + p_ptt, + nvram_offset_in_bytes, size_in_bytes, buf); if (status != DBG_STATUS_OK) return status; /* Extract and check first signature */ signature = qed_read_unaligned_dword(byte_buf); - byte_buf += sizeof(u32); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Extract number of modules */ @@ -4282,16 +4591,16 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, /* Skip all modules */ for (i = 0; i < modules_num; i++) { - u8 module_len = *(byte_buf++); - + module_len = *(byte_buf++); byte_buf += module_len; } /* Extract and check second signature */ signature = qed_read_unaligned_dword(byte_buf); - byte_buf += sizeof(u32); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; + return DBG_STATUS_OK; } @@ -4308,10 +4617,10 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, bool mcp_access; int halted = 0; - mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); - *num_dumped_dwords = 0; + mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); + /* Get trace data info */ status = qed_mcp_trace_get_data_info(p_hwfn, p_ptt, @@ -4328,7 +4637,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump, "dump-type", "mcp-trace"); /* Halt MCP while reading from scratchpad so the read data will be - * consistent if halt fails, MCP trace is taken anyway, with a small + * consistent. if halt fails, MCP trace is taken anyway, with a small * risk that it may be corrupt. */ if (dump && mcp_access) { @@ -4339,8 +4648,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Find trace data size */ trace_data_size_dwords = - DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), - BYTES_IN_DWORD); + DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), + BYTES_IN_DWORD); /* Dump trace data section header and param */ offset += qed_dump_section_hdr(dump_buf + offset, @@ -4354,17 +4663,17 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords); + trace_data_size_dwords, false); /* Resume MCP (only if halt succeeded) */ - if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0) + if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); /* Dump trace meta section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1); - /* Read trace meta info */ + /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */ if (mcp_access) { status = qed_mcp_trace_get_meta_info(p_hwfn, p_ptt, @@ -4391,6 +4700,9 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, if (status == DBG_STATUS_OK) offset += trace_meta_size_dwords; + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + *num_dumped_dwords = offset; /* If no mcp access, indicate that the dump doesn't contain the meta @@ -4405,7 +4717,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, dwords_read, size_param_offset; + u32 dwords_read, size_param_offset, offset = 0; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4417,8 +4729,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo"); - /* Dump fifo data section header and param. The size param is 0 for now, - * and is overwritten after reading the FIFO. + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1); @@ -4430,8 +4742,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, * test how much data is available, except for reading it. */ offset += REG_FIFO_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, @@ -4456,8 +4767,12 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4467,7 +4782,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, dwords_read, size_param_offset; + u32 dwords_read, size_param_offset, offset = 0; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4479,8 +4794,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo"); - /* Dump fifo data section header and param. The size param is 0 for now, - * and is overwritten after reading the FIFO. + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1); @@ -4492,8 +4807,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, * test how much data is available, except for reading it. */ offset += IGU_FIFO_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, @@ -4519,8 +4833,12 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4531,7 +4849,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { - u32 offset = 0, size_param_offset, override_window_dwords; + u32 size_param_offset, override_window_dwords, offset = 0; *num_dumped_dwords = 0; @@ -4542,8 +4860,8 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override"); - /* Dump data section header and param. The size param is 0 for now, and - * is overwritten after reading the data. + /* Dump data section header and param. The size param is 0 for now, + * and is overwritten after reading the data. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1); @@ -4552,8 +4870,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, if (!dump) { offset += PROTECTION_OVERRIDE_DEPTH_DWORDS; - *num_dumped_dwords = offset; - return DBG_STATUS_OK; + goto out; } /* Add override window info to buffer */ @@ -4569,8 +4886,12 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, offset += override_window_dwords; qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); +out: + /* Dump last section */ + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); *num_dumped_dwords = offset; + return DBG_STATUS_OK; } @@ -4593,11 +4914,14 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts"); + + /* Find Storm dump size */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx; + struct storm_defs *storm = &s_storm_defs[storm_id]; u32 last_list_idx, addr; - if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id]) + if (dev_data->block_in_reset[storm->block_id]) continue; /* Read FW info for the current Storm */ @@ -4606,26 +4930,26 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, asserts = &fw_info.fw_asserts_section; /* Dump FW Asserts section header and params */ - storm_letter_str[0] = s_storm_defs[storm_id].letter; - offset += qed_dump_section_hdr(dump_buf + offset, dump, - "fw_asserts", 2); - offset += qed_dump_str_param(dump_buf + offset, dump, "storm", - storm_letter_str); - offset += qed_dump_num_param(dump_buf + offset, dump, "size", + storm_letter_str[0] = storm->letter; + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "fw_asserts", 2); + offset += qed_dump_str_param(dump_buf + offset, + dump, "storm", storm_letter_str); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "size", asserts->list_element_dword_size); + /* Read and dump FW Asserts data */ if (!dump) { offset += asserts->list_element_dword_size; continue; } - /* Read and dump FW Asserts data */ - fw_asserts_section_addr = - s_storm_defs[storm_id].sem_fast_mem_addr + + fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + RAM_LINES_TO_BYTES(asserts->section_ram_line_offset); - next_list_idx_addr = - fw_asserts_section_addr + + next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); last_list_idx = (next_list_idx > 0 @@ -4638,11 +4962,13 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, - asserts->list_element_dword_size); + asserts->list_element_dword_size, + false); } /* Dump last section */ - offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); + offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + return offset; } @@ -4650,10 +4976,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) { - /* Convert binary data to debug arrays */ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; + /* convert binary data to debug arrays */ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset); @@ -4682,14 +5008,17 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; + return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4702,12 +5031,14 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_grc_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4724,25 +5055,31 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct idle_chk_data *idle_chk; + enum dbg_status status; + idle_chk = &dev_data->idle_chk; *buf_size = 0; + + status = qed_dbg_dev_init(p_hwfn, p_ptt); if (status != DBG_STATUS_OK) return status; + if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; - if (!dev_data->idle_chk.buf_size_set) { - dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn, - p_ptt, - NULL, false); - dev_data->idle_chk.buf_size_set = true; + + if (!idle_chk->buf_size_set) { + idle_chk->buf_size = qed_idle_chk_dump(p_hwfn, + p_ptt, NULL, false); + idle_chk->buf_size_set = true; } - *buf_size = dev_data->idle_chk.buf_size; + *buf_size = idle_chk->buf_size; + return DBG_STATUS_OK; } @@ -4755,12 +5092,14 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4783,8 +5122,10 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4797,13 +5138,12 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - /* validate buffer size */ status = - qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - - if (status != DBG_STATUS_OK && - status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) + qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK && status != + DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) @@ -4829,8 +5169,10 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4843,12 +5185,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4871,8 +5215,10 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4885,12 +5231,14 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4913,8 +5261,10 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; + return qed_protection_override_dump(p_hwfn, p_ptt, NULL, false, buf_size); } @@ -4925,15 +5275,18 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = + qed_dbg_protection_override_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4958,12 +5311,15 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); *buf_size = 0; + if (status != DBG_STATUS_OK) return status; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); + *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false); + return DBG_STATUS_OK; } @@ -4973,19 +5329,26 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, - &needed_buf_size_in_dwords); - *num_dumped_dwords = 0; + + status = + qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); if (status != DBG_STATUS_OK) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + return DBG_STATUS_OK; } @@ -5005,9 +5368,14 @@ struct mcp_trace_format { #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 #define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; }; +/* Meta data structure, generated by a perl script during MFW build. therefore, + * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl + * script. + */ struct mcp_trace_meta { u32 modules_num; char **modules; @@ -5015,7 +5383,7 @@ struct mcp_trace_meta { struct mcp_trace_format *formats; }; -/* Reg fifo element */ +/* REG fifo element */ struct reg_fifo_element { u64 data; #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0 @@ -5140,12 +5508,15 @@ struct igu_fifo_addr_data { /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 + #define MCP_TRACE_MAX_MODULE_LEN 8 #define MCP_TRACE_FORMAT_MAX_PARAMS 3 #define MCP_TRACE_FORMAT_PARAM_WIDTH \ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT) + #define REG_FIFO_ELEMENT_ADDR_FACTOR 4 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 + #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 /********************************* Macros ************************************/ @@ -5154,59 +5525,178 @@ struct igu_fifo_addr_data { /***************************** Constant Arrays *******************************/ +struct user_dbg_array { + const u32 *ptr; + u32 size_in_dwords; +}; + +/* Debug arrays */ +static struct user_dbg_array +s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; + /* Status string array */ static const char * const s_status_str[] = { + /* DBG_STATUS_OK */ "Operation completed successfully", + + /* DBG_STATUS_APP_VERSION_NOT_SET */ "Debug application version wasn't set", + + /* DBG_STATUS_UNSUPPORTED_APP_VERSION */ "Unsupported debug application version", + + /* DBG_STATUS_DBG_BLOCK_NOT_RESET */ "The debug block wasn't reset since the last recording", + + /* DBG_STATUS_INVALID_ARGS */ "Invalid arguments", + + /* DBG_STATUS_OUTPUT_ALREADY_SET */ "The debug output was already set", + + /* DBG_STATUS_INVALID_PCI_BUF_SIZE */ "Invalid PCI buffer size", + + /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */ "PCI buffer allocation failed", + + /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */ "A PCI buffer wasn't allocated", + + /* DBG_STATUS_TOO_MANY_INPUTS */ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true", - "GRC/Timestamp input overlap in cycle dword 0", + + /* DBG_STATUS_INPUT_OVERLAP */ + "Overlapping debug bus inputs", + + /* DBG_STATUS_HW_ONLY_RECORDING */ "Cannot record Storm data since the entire recording cycle is used by HW", + + /* DBG_STATUS_STORM_ALREADY_ENABLED */ "The Storm was already enabled", + + /* DBG_STATUS_STORM_NOT_ENABLED */ "The specified Storm wasn't enabled", + + /* DBG_STATUS_BLOCK_ALREADY_ENABLED */ "The block was already enabled", + + /* DBG_STATUS_BLOCK_NOT_ENABLED */ "The specified block wasn't enabled", + + /* DBG_STATUS_NO_INPUT_ENABLED */ "No input was enabled for recording", + + /* DBG_STATUS_NO_FILTER_TRIGGER_64B */ "Filters and triggers are not allowed when recording in 64b units", + + /* DBG_STATUS_FILTER_ALREADY_ENABLED */ "The filter was already enabled", + + /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */ "The trigger was already enabled", + + /* DBG_STATUS_TRIGGER_NOT_ENABLED */ "The trigger wasn't enabled", + + /* DBG_STATUS_CANT_ADD_CONSTRAINT */ "A constraint can be added only after a filter was enabled or a trigger state was added", + + /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */ "Cannot add more than 3 trigger states", + + /* DBG_STATUS_TOO_MANY_CONSTRAINTS */ "Cannot add more than 4 constraints per filter or trigger state", + + /* DBG_STATUS_RECORDING_NOT_STARTED */ "The recording wasn't started", + + /* DBG_STATUS_DATA_DIDNT_TRIGGER */ "A trigger was configured, but it didn't trigger", + + /* DBG_STATUS_NO_DATA_RECORDED */ "No data was recorded", + + /* DBG_STATUS_DUMP_BUF_TOO_SMALL */ "Dump buffer is too small", + + /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */ "Dumped data is not aligned to chunks", + + /* DBG_STATUS_UNKNOWN_CHIP */ "Unknown chip", + + /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */ "Failed allocating virtual memory", + + /* DBG_STATUS_BLOCK_IN_RESET */ "The input block is in reset", + + /* DBG_STATUS_INVALID_TRACE_SIGNATURE */ "Invalid MCP trace signature found in NVRAM", + + /* DBG_STATUS_INVALID_NVRAM_BUNDLE */ "Invalid bundle ID found in NVRAM", + + /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */ "Failed getting NVRAM image", + + /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */ "NVRAM image is not dword-aligned", + + /* DBG_STATUS_NVRAM_READ_FAILED */ "Failed reading from NVRAM", + + /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */ "Idle check parsing failed", + + /* DBG_STATUS_MCP_TRACE_BAD_DATA */ "MCP Trace data is corrupt", - "Dump doesn't contain meta data - it must be provided in an image file", + + /* DBG_STATUS_MCP_TRACE_NO_META */ + "Dump doesn't contain meta data - it must be provided in image file", + + /* DBG_STATUS_MCP_COULD_NOT_HALT */ "Failed to halt MCP", + + /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", + + /* DBG_STATUS_DMAE_FAILED */ "DMAE transaction failed", + + /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", + + /* DBG_STATUS_IGU_FIFO_BAD_DATA */ "IGU FIFO data is corrupt", + + /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */ "MCP failed to mask parities", + + /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */ "FW Asserts parsing failed", + + /* DBG_STATUS_REG_FIFO_BAD_DATA */ "GRC FIFO data is corrupt", + + /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */ "Protection Override data is corrupt", + + /* DBG_STATUS_DBG_ARRAY_NOT_SET */ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", - "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)" + + /* DBG_STATUS_FILTER_BUG */ + "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)", + + /* DBG_STATUS_NON_MATCHING_LINES */ + "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)", + + /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */ + "The selected trigger dword offset wasn't enabled in the recorded HW block", + + /* DBG_STATUS_DBG_BUS_IN_USE */ + "The debug bus is in use" }; /* Idle check severity names array */ @@ -5223,12 +5713,13 @@ static const char * const s_mcp_trace_level_str[] = { "DEBUG" }; -/* Parsing strings */ +/* Access type names array */ static const char * const s_access_strs[] = { "read", "write" }; +/* Privilege type names array */ static const char * const s_privilege_strs[] = { "VF", "PDA", @@ -5236,6 +5727,7 @@ static const char * const s_privilege_strs[] = { "UA" }; +/* Protection type names array */ static const char * const s_protection_strs[] = { "(default)", "(default)", @@ -5247,6 +5739,7 @@ static const char * const s_protection_strs[] = { "override UA" }; +/* Master type names array */ static const char * const s_master_strs[] = { "???", "pxp", @@ -5266,6 +5759,7 @@ static const char * const s_master_strs[] = { "???" }; +/* REG FIFO error messages array */ static const char * const s_reg_fifo_error_strs[] = { "grc timeout", "address doesn't belong to any block", @@ -5274,6 +5768,7 @@ static const char * const s_reg_fifo_error_strs[] = { "path isolation error" }; +/* IGU FIFO sources array */ static const char * const s_igu_fifo_source_strs[] = { "TSTORM", "MSTORM", @@ -5288,6 +5783,7 @@ static const char * const s_igu_fifo_source_strs[] = { "GRC", }; +/* IGU FIFO error messages */ static const char * const s_igu_fifo_error_strs[] = { "no error", "length error", @@ -5308,13 +5804,18 @@ static const char * const s_igu_fifo_error_strs[] = { /* IGU FIFO address data */ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { - {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM}, - {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, - {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA}, + {0x0, 0x101, "MSI-X Memory", NULL, + IGU_ADDR_TYPE_MSIX_MEM}, + {0x102, 0x1ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x200, 0x200, "Write PBA[0:63]", NULL, + IGU_ADDR_TYPE_WRITE_PBA}, {0x201, 0x201, "Write PBA[64:127]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, - {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, - {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, + {0x202, 0x202, "Write PBA[128]", "reserved", + IGU_ADDR_TYPE_WRITE_PBA}, + {0x203, 0x3ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, {0x400, 0x5ef, "Write interrupt acknowledgment", NULL, IGU_ADDR_TYPE_WRITE_INT_ACK}, {0x5f0, 0x5f0, "Attention bits update", NULL, @@ -5331,8 +5832,10 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { IGU_ADDR_TYPE_READ_INT}, {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL, IGU_ADDR_TYPE_READ_INT}, - {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, - {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE} + {0x5f7, 0x5ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x600, 0x7ff, "Producer update", NULL, + IGU_ADDR_TYPE_WRITE_PROD_UPDATE} }; /******************************** Variables **********************************/ @@ -5340,28 +5843,12 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /* MCP Trace meta data - used in case the dump doesn't contain the meta data * (e.g. due to no NVRAM access). */ -static struct dbg_array s_mcp_trace_meta = { NULL, 0 }; +static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 }; /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; -/***************************** Public Functions *******************************/ - -enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) -{ - /* Convert binary data to debug arrays */ - struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; - u8 buf_id; - - for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { - s_dbg_arrays[buf_id].ptr = - (u32 *)(bin_ptr + buf_array[buf_id].offset); - s_dbg_arrays[buf_id].size_in_dwords = - BYTES_TO_DWORDS(buf_array[buf_id].length); - } - - return DBG_STATUS_OK; -} +/**************************** Private Functions ******************************/ static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { @@ -5381,10 +5868,8 @@ static u32 qed_read_from_cyclic_buf(void *buf, u32 *offset, u32 buf_size, u8 num_bytes_to_read) { - u8 *bytes_buf = (u8 *)buf; - u8 *val_ptr; + u8 i, *val_ptr, *bytes_buf = (u8 *)buf; u32 val = 0; - u8 i; val_ptr = (u8 *)&val; @@ -5412,6 +5897,7 @@ static u32 qed_read_dword_from_buf(void *buf, u32 *offset) u32 dword_val = *(u32 *)&((u8 *)buf)[*offset]; *offset += 4; + return dword_val; } @@ -5445,7 +5931,7 @@ static u32 qed_read_param(u32 *dump_buf, const char **param_str_val, u32 *param_num_val) { char *char_buf = (char *)dump_buf; - u32 offset = 0; /* In bytes */ + size_t offset = 0; /* Extract param name */ *param_name = char_buf; @@ -5493,37 +5979,31 @@ static u32 qed_print_section_params(u32 *dump_buf, u32 i, dump_offset = 0, results_offset = 0; for (i = 0; i < num_section_params; i++) { - const char *param_name; - const char *param_str_val; + const char *param_name, *param_str_val; u32 param_num_val = 0; dump_offset += qed_read_param(dump_buf + dump_offset, ¶m_name, ¶m_str_val, ¶m_num_val); + if (param_str_val) - /* String param */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %s\n", param_name, param_str_val); else if (strcmp(param_name, "fw-timestamp")) - /* Numeric param */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %d\n", param_name, param_num_val); } - results_offset += - sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); + results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), + "\n"); + *num_chars_printed = results_offset; - return dump_offset; -} -const char *qed_dbg_get_status_str(enum dbg_status status) -{ - return (status < - MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; + return dump_offset; } /* Parses the idle check rules and returns the number of characters printed. @@ -5537,7 +6017,10 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, char *results_buf, u32 *num_errors, u32 *num_warnings) { - u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */ + /* Offset in results_buf in bytes */ + u32 results_offset = 0; + + u32 rule_idx; u16 i, j; *num_errors = 0; @@ -5548,16 +6031,15 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, rule_idx++) { const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data; struct dbg_idle_chk_result_hdr *hdr; - const char *parsing_str; + const char *parsing_str, *lsi_msg; u32 parsing_str_offset; - const char *lsi_msg; - u8 curr_reg_id = 0; bool has_fw_msg; + u8 curr_reg_id; hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; rule_parsing_data = (const struct dbg_idle_chk_rule_parsing_data *) - &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. + &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. ptr[hdr->rule_id]; parsing_str_offset = GET_FIELD(rule_parsing_data->data, @@ -5565,16 +6047,18 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, has_fw_msg = GET_FIELD(rule_parsing_data->data, DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; - parsing_str = &((const char *) - s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) - [parsing_str_offset]; + parsing_str = + &((const char *) + s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) + [parsing_str_offset]; lsi_msg = parsing_str; + curr_reg_id = 0; if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES) return 0; /* Skip rule header */ - dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4); + dump_buf += BYTES_TO_DWORDS(sizeof(*hdr)); /* Update errors/warnings count */ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR || @@ -5606,19 +6090,19 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, for (i = 0; i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs; i++) { - struct dbg_idle_chk_result_reg_hdr *reg_hdr - = (struct dbg_idle_chk_result_reg_hdr *) - dump_buf; - bool is_mem = - GET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); - u8 reg_id = - GET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool is_mem; + u8 reg_id; + + reg_hdr = + (struct dbg_idle_chk_result_reg_hdr *)dump_buf; + is_mem = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); + reg_id = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); /* Skip reg header */ - dump_buf += - (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4); + dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr)); /* Skip register names until the required reg_id is * reached. @@ -5660,6 +6144,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, /* Check if end of dump buffer was exceeded */ if (dump_buf > dump_buf_end) return 0; + return results_offset; } @@ -5680,13 +6165,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; u32 num_section_params = 0, num_rules; - u32 results_offset = 0; /* Offset in results_buf in bytes */ + + /* Offset in results_buf in bytes */ + u32 results_offset = 0; *parsed_results_bytes = 0; *num_errors = 0; *num_warnings = 0; - if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) + + if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || + !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; /* Read global_params section */ @@ -5705,10 +6193,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, §ion_name, &num_section_params); if (strcmp(section_name, "idle_chk") || num_section_params != 1) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; - dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, &num_rules); - if (strcmp(param_name, "num_rules") != 0) + if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; if (num_rules) { @@ -5728,7 +6215,7 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; - if (rules_print_size == 0) + if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; /* Print LSI output */ @@ -5745,69 +6232,38 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; - if (rules_print_size == 0) + if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; } /* Print errors/warnings count */ - if (*num_errors) { + if (*num_errors) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check failed!!! (with %d errors and %d warnings)\n", *num_errors, *num_warnings); - } else if (*num_warnings) { + else if (*num_warnings) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "\nIdle Check completed successfuly (with %d warnings)\n", + "\nIdle Check completed successfully (with %d warnings)\n", *num_warnings); - } else { + else results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "\nIdle Check completed successfuly\n"); - } + "\nIdle Check completed successfully\n"); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; + return DBG_STATUS_OK; } -enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - u32 num_errors, num_warnings; - - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, - results_buf_size, - &num_errors, &num_warnings); -} - -enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf, - u32 *num_errors, u32 *num_warnings) -{ - u32 parsed_buf_size; - - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, - &parsed_buf_size, - num_errors, num_warnings); -} - -/* Frees the specified MCP Trace meta data */ -static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, - struct mcp_trace_meta *meta) +/* Frees the specified MCP Trace meta data */ +static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, + struct mcp_trace_meta *meta) { u32 i; @@ -5841,12 +6297,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, /* Read first signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; - /* Read number of modules and allocate memory for all the modules - * pointers. - */ + /* Read no. of modules and allocate memory for their pointers */ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset); meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL); if (!meta->modules) @@ -5871,7 +6325,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, /* Read second signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); - if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) + if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read number of formats and allocate memory for all formats */ @@ -5919,10 +6373,10 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_mask, param_shift, param_num_val; - u32 num_section_params, offset, end_offset, bytes_left; + u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords; + u32 param_mask, param_shift, param_num_val, num_section_params; const char *section_name, *param_name, *param_str_val; - u32 trace_data_dwords, trace_meta_dwords; + u32 offset, results_offset = 0; struct mcp_trace_meta meta; struct mcp_trace *trace; enum dbg_status status; @@ -5955,7 +6409,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; - trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace); + trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; end_offset = trace->trace_prod; bytes_left = qed_cyclic_sub(end_offset, offset, trace->size); @@ -5968,7 +6422,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, return DBG_STATUS_MCP_TRACE_BAD_DATA; dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, ¶m_num_val); - if (strcmp(param_name, "size") != 0) + if (strcmp(param_name, "size")) return DBG_STATUS_MCP_TRACE_BAD_DATA; trace_meta_dwords = param_num_val; @@ -6028,6 +6482,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, } format_ptr = &meta.formats[format_idx]; + for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT; @@ -6050,6 +6505,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, */ if (param_size == 3) param_size = 4; + if (bytes_left < param_size) { status = DBG_STATUS_MCP_TRACE_BAD_DATA; goto free_mem; @@ -6059,13 +6515,14 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, &offset, trace->size, param_size); + bytes_left -= param_size; } format_level = (u8)((format_ptr->data & MCP_TRACE_FORMAT_LEVEL_MASK) >> - MCP_TRACE_FORMAT_LEVEL_SHIFT); + MCP_TRACE_FORMAT_LEVEL_SHIFT); format_module = (u8)((format_ptr->data & MCP_TRACE_FORMAT_MODULE_MASK) >> @@ -6094,30 +6551,6 @@ free_mem: return status; } -enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} - -enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - - return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); -} - /* Parses a Reg FIFO dump buffer. * If result_buf is not NULL, the Reg FIFO results are printed to it. * In any case, the required results buffer size is assigned to @@ -6130,10 +6563,11 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct reg_fifo_element *elements; u8 i, j, err_val, vf_val; + u32 results_offset = 0; char vf_str[4]; /* Read global_params section */ @@ -6179,17 +6613,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", elements[i].data, (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_ADDRESS) * - REG_FIFO_ELEMENT_ADDR_FACTOR, - s_access_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_ADDRESS) * + REG_FIFO_ELEMENT_ADDR_FACTOR, + s_access_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ACCESS)], (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_PF), vf_str, + REG_FIFO_ELEMENT_PF), + vf_str, (u32)GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_PORT), - s_privilege_strs[GET_FIELD(elements[i]. - data, - REG_FIFO_ELEMENT_PRIVILEGE)], + REG_FIFO_ELEMENT_PORT), + s_privilege_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PRIVILEGE)], s_protection_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PROTECTION)], s_master_strs[GET_FIELD(elements[i].data, @@ -6201,18 +6635,18 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, REG_FIFO_ELEMENT_ERROR); j < ARRAY_SIZE(s_reg_fifo_error_strs); j++, err_val >>= 1) { - if (!(err_val & 0x1)) - continue; - if (err_printed) + if (err_val & 0x1) { + if (err_printed) + results_offset += + sprintf(qed_get_buf_ptr + (results_buf, + results_offset), ", "); results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), - ", "); - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), "%s", - s_reg_fifo_error_strs[j]); - err_printed = true; + sprintf(qed_get_buf_ptr + (results_buf, results_offset), "%s", + s_reg_fifo_error_strs[j]); + err_printed = true; + } } results_offset += @@ -6225,31 +6659,140 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; + return DBG_STATUS_OK; } -enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) +static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element + *element, char + *results_buf, + u32 *results_offset, + u32 *parsed_results_bytes) { - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} + const struct igu_fifo_addr_data *found_addr = NULL; + u8 source, err_type, i, is_cleanup; + char parsed_addr_data[32]; + char parsed_wr_data[256]; + u32 wr_data, prod_cons; + bool is_wr_cmd, is_pf; + u16 cmd_addr; + u64 dword12; -enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; + /* Dword12 (dword index 1 and 2) contains bits 32..95 of the + * FIFO element. + */ + dword12 = ((u64)element->dword2 << 32) | element->dword1; + is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); + is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF); + cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); + source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE); + err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); + + if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + /* Find address data */ + for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) { + const struct igu_fifo_addr_data *curr_addr = + &s_igu_fifo_addr_data[i]; + + if (cmd_addr >= curr_addr->start_addr && cmd_addr <= + curr_addr->end_addr) + found_addr = curr_addr; + } + + if (!found_addr) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + + /* Prepare parsed address data */ + switch (found_addr->type) { + case IGU_ADDR_TYPE_MSIX_MEM: + sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2); + break; + case IGU_ADDR_TYPE_WRITE_INT_ACK: + case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: + sprintf(parsed_addr_data, + " SB = 0x%x", cmd_addr - found_addr->start_addr); + break; + default: + parsed_addr_data[0] = '\0'; + } + + if (!is_wr_cmd) { + parsed_wr_data[0] = '\0'; + goto out; + } + + /* Prepare parsed write data */ + wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA); + prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS); + is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE); + + if (source == IGU_SRC_ATTN) { + sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons); + } else { + if (is_cleanup) { + u8 cleanup_val, cleanup_type; + + cleanup_val = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); + cleanup_type = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); + + sprintf(parsed_wr_data, + "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ", + cleanup_val ? "set" : "clear", + cleanup_type); + } else { + u8 update_flag, en_dis_int_for_sb, segment; + u8 timer_mask; + + update_flag = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_UPDATE_FLAG); + en_dis_int_for_sb = + GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); + segment = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_SEGMENT); + timer_mask = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_TIMER_MASK); + + sprintf(parsed_wr_data, + "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", + prod_cons, + update_flag ? "update" : "nop", + en_dis_int_for_sb + ? (en_dis_int_for_sb == 1 ? "disable" : "nop") + : "enable", + segment ? "attn" : "regular", + timer_mask); + } + } +out: + /* Add parsed element to parsed buffer */ + *results_offset += sprintf(qed_get_buf_ptr(results_buf, + *results_offset), + "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n", + element->dword2, element->dword1, + element->dword0, + is_pf ? "pf" : "vf", + GET_FIELD(element->dword0, + IGU_FIFO_ELEMENT_DWORD0_FID), + s_igu_fifo_source_strs[source], + is_wr_cmd ? "wr" : "rd", + cmd_addr, + (!is_pf && found_addr->vf_desc) + ? found_addr->vf_desc + : found_addr->desc, + parsed_addr_data, + parsed_wr_data, + s_igu_fifo_error_strs[err_type]); + + return DBG_STATUS_OK; } /* Parses an IGU FIFO dump buffer. @@ -6264,12 +6807,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct igu_fifo_element *elements; - char parsed_addr_data[32]; - char parsed_wr_data[256]; - u8 i, j; + enum dbg_status status; + u32 results_offset = 0; + u8 i; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6298,118 +6841,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, /* Decode elements */ for (i = 0; i < num_elements; i++) { - /* dword12 (dword index 1 and 2) contains bits 32..95 of the - * FIFO element. - */ - u64 dword12 = - ((u64)elements[i].dword2 << 32) | elements[i].dword1; - bool is_wr_cmd = GET_FIELD(dword12, - IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); - bool is_pf = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_IS_PF); - u16 cmd_addr = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); - u8 source = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_SOURCE); - u8 err_type = GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); - const struct igu_fifo_addr_data *addr_data = NULL; - - if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - - /* Find address data */ - for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data; - j++) - if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr && - cmd_addr <= s_igu_fifo_addr_data[j].end_addr) - addr_data = &s_igu_fifo_addr_data[j]; - if (!addr_data) - return DBG_STATUS_IGU_FIFO_BAD_DATA; - - /* Prepare parsed address data */ - switch (addr_data->type) { - case IGU_ADDR_TYPE_MSIX_MEM: - sprintf(parsed_addr_data, - " vector_num=0x%x", cmd_addr / 2); - break; - case IGU_ADDR_TYPE_WRITE_INT_ACK: - case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: - sprintf(parsed_addr_data, - " SB=0x%x", cmd_addr - addr_data->start_addr); - break; - default: - parsed_addr_data[0] = '\0'; - } - - /* Prepare parsed write data */ - if (is_wr_cmd) { - u32 wr_data = GET_FIELD(dword12, - IGU_FIFO_ELEMENT_DWORD12_WR_DATA); - u32 prod_cons = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_PROD_CONS); - u8 is_cleanup = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_CMD_TYPE); - - if (source == IGU_SRC_ATTN) { - sprintf(parsed_wr_data, - "prod: 0x%x, ", prod_cons); - } else { - if (is_cleanup) { - u8 cleanup_val = GET_FIELD(wr_data, - IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); - u8 cleanup_type = GET_FIELD(wr_data, - IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); - - sprintf(parsed_wr_data, - "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ", - cleanup_val ? "set" : "clear", - cleanup_type); - } else { - u8 update_flag = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_UPDATE_FLAG); - u8 en_dis_int_for_sb = - GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); - u8 segment = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_SEGMENT); - u8 timer_mask = GET_FIELD(wr_data, - IGU_FIFO_WR_DATA_TIMER_MASK); - - sprintf(parsed_wr_data, - "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ", - prod_cons, - update_flag ? "update" : "nop", - en_dis_int_for_sb - ? (en_dis_int_for_sb == - 1 ? "disable" : "nop") : - "enable", - segment ? "attn" : "regular", - timer_mask); - } - } - } else { - parsed_wr_data[0] = '\0'; - } - - /* Add parsed element to parsed buffer */ - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), - "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n", - elements[i].dword2, elements[i].dword1, - elements[i].dword0, - is_pf ? "pf" : "vf", - GET_FIELD(elements[i].dword0, - IGU_FIFO_ELEMENT_DWORD0_FID), - s_igu_fifo_source_strs[source], - is_wr_cmd ? "wr" : "rd", cmd_addr, - (!is_pf && addr_data->vf_desc) - ? addr_data->vf_desc : addr_data->desc, - parsed_addr_data, parsed_wr_data, - s_igu_fifo_error_strs[err_type]); + status = qed_parse_igu_fifo_element(&elements[i], + results_buf, + &results_offset, + parsed_results_bytes); + if (status != DBG_STATUS_OK) + return status; } results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6418,31 +6855,8 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; - return DBG_STATUS_OK; -} - -enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} - -enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return DBG_STATUS_OK; } static enum dbg_status @@ -6452,9 +6866,10 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, param_num_val, num_section_params, num_elements; const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; struct protection_override_element *elements; + u32 results_offset = 0; u8 i; /* Read global_params section */ @@ -6477,7 +6892,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, ¶m_name, ¶m_str_val, ¶m_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; - if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0) + if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS; elements = (struct protection_override_element *)dump_buf; @@ -6486,7 +6901,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, for (i = 0; i < num_elements; i++) { u32 address = GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_ADDRESS) * - PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; + PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6512,33 +6927,8 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; - return DBG_STATUS_OK; -} - -enum dbg_status -qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size) -{ - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); -} -enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) -{ - u32 parsed_buf_size; - - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, - &parsed_buf_size); + return DBG_STATUS_OK; } /* Parses a FW Asserts dump buffer. @@ -6553,7 +6943,7 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, char *results_buf, u32 *parsed_results_bytes) { - u32 results_offset = 0, num_section_params, param_num_val, i; + u32 num_section_params, param_num_val, i, results_offset = 0; const char *param_name, *param_str_val, *section_name; bool last_section_found = false; @@ -6569,54 +6959,216 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); - while (!last_section_found) { - const char *storm_letter = NULL; - u32 storm_dump_size = 0; + while (!last_section_found) { dump_buf += qed_read_section_hdr(dump_buf, §ion_name, &num_section_params); - if (!strcmp(section_name, "last")) { - last_section_found = true; - continue; - } else if (strcmp(section_name, "fw_asserts")) { - return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - } + if (!strcmp(section_name, "fw_asserts")) { + /* Extract params */ + const char *storm_letter = NULL; + u32 storm_dump_size = 0; + + for (i = 0; i < num_section_params; i++) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + ¶m_num_val); + if (!strcmp(param_name, "storm")) + storm_letter = param_str_val; + else if (!strcmp(param_name, "size")) + storm_dump_size = param_num_val; + else + return + DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } - /* Extract params */ - for (i = 0; i < num_section_params; i++) { - dump_buf += qed_read_param(dump_buf, - ¶m_name, - ¶m_str_val, - ¶m_num_val); - if (!strcmp(param_name, "storm")) - storm_letter = param_str_val; - else if (!strcmp(param_name, "size")) - storm_dump_size = param_num_val; - else + if (!storm_letter || !storm_dump_size) return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - } - - if (!storm_letter || !storm_dump_size) - return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; - /* Print data */ - results_offset += sprintf(qed_get_buf_ptr(results_buf, - results_offset), - "\n%sSTORM_ASSERT: size=%d\n", - storm_letter, storm_dump_size); - for (i = 0; i < storm_dump_size; i++, dump_buf++) + /* Print data */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "%08x\n", *dump_buf); + "\n%sSTORM_ASSERT: size=%d\n", + storm_letter, storm_dump_size); + for (i = 0; i < storm_dump_size; i++, dump_buf++) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "%08x\n", *dump_buf); + } else if (!strcmp(section_name, "last")) { + last_section_found = true; + } else { + return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } } /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +/***************************** Public Functions *******************************/ + +enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) +{ + struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; + u8 buf_id; + + /* Convert binary data to debug arrays */ + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { + s_user_dbg_arrays[buf_id].ptr = + (u32 *)(bin_ptr + buf_array[buf_id].offset); + s_user_dbg_arrays[buf_id].size_in_dwords = + BYTES_TO_DWORDS(buf_array[buf_id].length); + } + return DBG_STATUS_OK; } +const char *qed_dbg_get_status_str(enum dbg_status status) +{ + return (status < + MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; +} + +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + u32 num_errors, num_warnings; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, + results_buf_size, + &num_errors, &num_warnings); +} + +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, u32 *num_warnings) +{ + u32 parsed_buf_size; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, + &parsed_buf_size, + num_errors, num_warnings); +} + +void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) +{ + s_mcp_trace_meta.ptr = data; + s_mcp_trace_meta.size_in_dwords = size; +} + +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_reg_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_reg_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_igu_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_igu_fifo_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, &parsed_buf_size); +} + +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_protection_override_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, results_buf_size); +} + +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_protection_override_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, + &parsed_buf_size); +} + enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index f872d7324814..ea1cc8eaa125 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -20,6 +20,9 @@ enum qed_dbg_features { DBG_FEATURE_NUM }; +/* Forward Declaration */ +struct qed_dev; + int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_grc_size(struct qed_dev *cdev); int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 858a57a73589..eedf79a026a2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -346,7 +346,7 @@ struct xstorm_core_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -368,85 +368,85 @@ struct tstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -681,7 +681,9 @@ struct core_rx_fast_path_cqe { __le16 packet_length; __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved[4]; + struct parsing_err_flags err_flags; + __le16 reserved0; + __le32 reserved1[3]; }; struct core_rx_gsi_offload_cqe { @@ -692,7 +694,7 @@ struct core_rx_gsi_offload_cqe { __le16 vlan; __le32 src_mac_addrhi; __le16 src_mac_addrlo; - u8 reserved1[2]; + __le16 qp_id; __le32 gid_dst[4]; }; @@ -774,15 +776,15 @@ struct core_tx_bd { __le16 bitfield1; #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 -#define CORE_TX_BD_TX_DST_MASK 0x1 -#define CORE_TX_BD_TX_DST_SHIFT 14 -#define CORE_TX_BD_RESERVED_MASK 0x1 -#define CORE_TX_BD_RESERVED_SHIFT 15 +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 }; enum core_tx_dest { CORE_TX_DEST_NW, CORE_TX_DEST_LB, + CORE_TX_DEST_RESERVED, + CORE_TX_DEST_DROP, MAX_CORE_TX_DEST }; @@ -804,12 +806,12 @@ struct core_tx_stop_ramrod_data { __le32 reserved0[2]; }; -enum dcb_dhcp_update_flag { - DONT_UPDATE_DCB_DHCP, +enum dcb_dscp_update_mode { + DONT_UPDATE_DCB_DSCP, UPDATE_DCB, UPDATE_DSCP, UPDATE_DCB_DSCP, - MAX_DCB_DHCP_UPDATE_FLAG + MAX_DCB_DSCP_UPDATE_MODE }; struct eth_mstorm_per_pf_stat { @@ -917,6 +919,14 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +enum iwarp_ll2_tx_queues { + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, + IWARP_LL2_ALIGNED_TX_QUEUE, + IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, + IWARP_LL2_ERROR, + MAX_IWARP_LL2_TX_QUEUES +}; + /* Mstorm non-triggering VF zone */ enum malicious_vf_error_id { MALICIOUS_VF_NO_ERROR, @@ -960,7 +970,7 @@ enum personality_type { PERSONALITY_ISCSI, PERSONALITY_FCOE, PERSONALITY_RDMA_AND_ETH, - PERSONALITY_RESERVED3, + PERSONALITY_RDMA, PERSONALITY_CORE, PERSONALITY_ETH, PERSONALITY_RESERVED4, @@ -971,16 +981,12 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; }; @@ -990,6 +996,7 @@ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; struct regpair consolid_q_pbl_addr; struct pf_start_tunnel_config tunnel_config; + __le32 reserved; __le16 event_ring_sb_id; u8 base_vf_id; u8 num_vfs; @@ -1007,7 +1014,6 @@ struct pf_start_ramrod_data { u8 pri_map_valid; __le32 outer_tag; struct hsi_fp_ver_struct hsi_fp_ver; - }; struct protocol_dcb_data { @@ -1023,14 +1029,8 @@ struct pf_update_tunnel_config { u8 update_rx_pf_clss; u8 update_rx_def_ucast_clss; u8 update_rx_def_non_ucast_clss; - u8 update_tx_pf_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; @@ -1038,17 +1038,17 @@ struct pf_update_tunnel_config { u8 tunnel_clss_ipgre; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved[2]; + __le16 reserved; }; struct pf_update_ramrod_data { u8 pf_id; - u8 update_eth_dcb_data_flag; - u8 update_fcoe_dcb_data_flag; - u8 update_iscsi_dcb_data_flag; - u8 update_roce_dcb_data_flag; - u8 update_rroce_dcb_data_flag; - u8 update_iwarp_dcb_data_flag; + u8 update_eth_dcb_data_mode; + u8 update_fcoe_dcb_data_mode; + u8 update_iscsi_dcb_data_mode; + u8 update_roce_dcb_data_mode; + u8 update_rroce_dcb_data_mode; + u8 update_iwarp_dcb_data_mode; u8 update_mf_vlan_flag; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; @@ -1127,7 +1127,7 @@ struct tstorm_per_port_stat { struct regpair iscsi_irregular_pkt; struct regpair fcoe_irregular_pkt; struct regpair roce_irregular_pkt; - struct regpair reserved; + struct regpair iwarp_irregular_pkt; struct regpair eth_irregular_pkt; struct regpair reserved1; struct regpair preroce_irregular_pkt; @@ -1326,6 +1326,87 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; +struct mstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + /* IGU cleanup command */ struct igu_cleanup { __le32 sb_id_and_flags; @@ -1389,44 +1470,6 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; - -struct mstorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1541,50 +1584,6 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 20 }; -struct ystorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2; - u8 byte3; - __le16 word0; - __le32 reg0; - __le32 reg1; - __le16 word1; - __le16 word2; - __le16 word3; - __le16 word4; - __le32 reg2; - __le32 reg3; -}; - /****************************************/ /* Debug Tools HSI constants and macros */ /****************************************/ @@ -1643,6 +1642,8 @@ enum block_addr { GRCBASE_MULD = 0x4e0000, GRCBASE_YULD = 0x4c8000, GRCBASE_XYLD = 0x4c0000, + GRCBASE_PTLD = 0x590000, + GRCBASE_YPLD = 0x5b0000, GRCBASE_PRM = 0x230000, GRCBASE_PBF_PB1 = 0xda0000, GRCBASE_PBF_PB2 = 0xda4000, @@ -1656,6 +1657,10 @@ enum block_addr { GRCBASE_TCFC = 0x2d0000, GRCBASE_IGU = 0x180000, GRCBASE_CAU = 0x1c0000, + GRCBASE_RGFS = 0xf00000, + GRCBASE_RGSRC = 0x320000, + GRCBASE_TGFS = 0xd00000, + GRCBASE_TGSRC = 0x322000, GRCBASE_UMAC = 0x51000, GRCBASE_XMAC = 0x210000, GRCBASE_DBG = 0x10000, @@ -1669,10 +1674,6 @@ enum block_addr { GRCBASE_PHY_PCIE = 0x620000, GRCBASE_LED = 0x6b8000, GRCBASE_AVS_WRAP = 0x6b0000, - GRCBASE_RGFS = 0x19d0000, - GRCBASE_TGFS = 0x19e0000, - GRCBASE_PTLD = 0x19f0000, - GRCBASE_YPLD = 0x1a10000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -1732,6 +1733,8 @@ enum block_id { BLOCK_MULD, BLOCK_YULD, BLOCK_XYLD, + BLOCK_PTLD, + BLOCK_YPLD, BLOCK_PRM, BLOCK_PBF_PB1, BLOCK_PBF_PB2, @@ -1745,6 +1748,10 @@ enum block_id { BLOCK_TCFC, BLOCK_IGU, BLOCK_CAU, + BLOCK_RGFS, + BLOCK_RGSRC, + BLOCK_TGFS, + BLOCK_TGSRC, BLOCK_UMAC, BLOCK_XMAC, BLOCK_DBG, @@ -1758,10 +1765,6 @@ enum block_id { BLOCK_PHY_PCIE, BLOCK_LED, BLOCK_AVS_WRAP, - BLOCK_RGFS, - BLOCK_TGFS, - BLOCK_PTLD, - BLOCK_YPLD, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID @@ -1780,6 +1783,10 @@ enum bin_dbg_buffer_type { BIN_BUF_DBG_ATTN_REGS, BIN_BUF_DBG_ATTN_INDEXES, BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BUS_BLOCKS, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BUS_BLOCKS_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, BIN_BUF_DBG_PARSING_STRINGS, MAX_BIN_DBG_BUFFER_TYPE }; @@ -1862,6 +1869,29 @@ enum dbg_attn_type { MAX_DBG_ATTN_TYPE }; +struct dbg_bus_block { + u8 num_of_lines; + u8 has_latency_events; + __le16 lines_offset; +}; + +struct dbg_bus_block_user_data { + u8 num_of_lines; + u8 has_latency_events; + __le16 names_offset; +}; + +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + /* condition header for registers dump */ struct dbg_dump_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ @@ -1879,17 +1909,21 @@ struct dbg_dump_mem { __le32 dword1; #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_RESERVED_MASK 0xFF -#define DBG_DUMP_MEM_RESERVED_SHIFT 24 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 }; /* register data for registers dump */ struct dbg_dump_reg { __le32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */ +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ -#define DBG_DUMP_REG_LENGTH_SHIFT 24 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */ +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ +#define DBG_DUMP_REG_LENGTH_SHIFT 24 }; /* split header for registers dump */ @@ -1910,20 +1944,24 @@ struct dbg_idle_chk_cond_hdr { /* Idle Check condition register */ struct dbg_idle_chk_cond_reg { __le32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries; /* number of registers entries to check */ - u8 entry_size; /* size of registers entry (in dwords) */ - u8 start_entry; /* index of the first entry to check */ + __le16 num_entries; + u8 entry_size; + u8 start_entry; }; /* Idle Check info register */ struct dbg_idle_chk_info_reg { __le32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 __le16 size; /* register size in dwords */ @@ -1996,15 +2034,17 @@ enum dbg_idle_chk_severity_types { /* Debug Bus block data */ struct dbg_bus_block_data { - u8 enabled; /* Indicates if the block is enabled for recording (0/1) */ - u8 hw_id; /* HW ID associated with the block */ - u8 line_num; /* Debug line number to select */ - u8 right_shift; /* Number of units to right the debug data (0-3) */ - u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */ - u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */ - u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced. - */ - u8 reserved; + __le16 data; +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 + u8 line_num; + u8 hw_id; }; /* Debug Bus Clients */ @@ -2045,6 +2085,14 @@ enum dbg_bus_constraint_ops { MAX_DBG_BUS_CONSTRAINT_OPS }; +struct dbg_bus_trigger_state_data { + u8 data; +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 +}; + /* Debug Bus memory address */ struct dbg_bus_mem_addr { __le32 lo; @@ -2078,66 +2126,42 @@ union dbg_bus_storm_eid_params { /* Debug Bus Storm data */ struct dbg_bus_storm_data { - u8 fast_enabled; - u8 fast_mode; - u8 slow_enabled; - u8 slow_mode; + u8 enabled; + u8 mode; u8 hw_id; u8 eid_filter_en; u8 eid_range_not_mask; u8 cid_filter_en; union dbg_bus_storm_eid_params eid_filter_params; - __le16 reserved; __le32 cid; }; /* Debug Bus data */ struct dbg_bus_data { - __le32 app_version; /* The tools version number of the application */ - u8 state; /* The current debug bus state */ - u8 hw_dwords; /* HW dwords per cycle */ - u8 next_hw_id; /* Next HW ID to be associated with an input */ - u8 num_enabled_blocks; /* Number of blocks enabled for recording */ - u8 num_enabled_storms; /* Number of Storms enabled for recording */ - u8 target; /* Output target */ - u8 next_trigger_state; /* ID of next trigger state to be added */ - u8 next_constraint_id; /* ID of next filter/trigger constraint to be - * added. - */ - u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */ - u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */ - u8 timestamp_input_en; /* Indicates if timestamp recording is enabled - * (0/1). - */ - u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */ - u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */ - u8 adding_filter; /* If true, the next added constraint belong to the - * filter. Otherwise, it belongs to the last added - * trigger state. Valid only if either filter or - * triggers are enabled. - */ - u8 filter_pre_trigger; /* Indicates if the recording filter should be - * applied before the trigger. Valid only if both - * filter and trigger are enabled (0/1). - */ - u8 filter_post_trigger; /* Indicates if the recording filter should be - * applied after the trigger. Valid only if both - * filter and trigger are enabled (0/1). - */ - u8 unify_inputs; /* If true, all inputs are associated with HW ID 0. - * Otherwise, each input is assigned a different HW ID - * (0/1). - */ - u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW - * recording to this engine (0/1). - */ - struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid - * only when the target is - * DBG_BUS_TARGET_ID_PCI. - */ + __le32 app_version; + u8 state; + u8 hw_dwords; + __le16 hw_id_mask; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; __le16 reserved; - struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */ - struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */ + u8 trigger_en; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 next_trigger_state; + u8 next_constraint_id; + u8 unify_inputs; + u8 rcv_from_other_engine; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[88]; + struct dbg_bus_storm_data storms[6]; }; enum dbg_bus_filter_types { @@ -2156,12 +2180,6 @@ enum dbg_bus_frame_modes { MAX_DBG_BUS_FRAME_MODES }; -enum dbg_bus_input_types { - DBG_BUS_INPUT_TYPE_STORM, - DBG_BUS_INPUT_TYPE_BLOCK, - MAX_DBG_BUS_INPUT_TYPES -}; - enum dbg_bus_other_engine_modes { DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, @@ -2185,19 +2203,19 @@ enum dbg_bus_pre_trigger_types { }; enum dbg_bus_semi_frame_modes { - DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, - DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = + 0, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = + 3, MAX_DBG_BUS_SEMI_FRAME_MODES }; /* Debug bus states */ enum dbg_bus_states { - DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */ - DBG_BUS_STATE_READY, /* debug bus is ready for configuration and - * recording. - */ - DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */ - DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */ + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, MAX_DBG_BUS_STATES }; @@ -2216,11 +2234,8 @@ enum dbg_bus_storm_modes { /* Debug bus target IDs */ enum dbg_bus_targets { - /* records debug bus to DBG block internal buffer */ DBG_BUS_TARGET_ID_INT_BUF, - /* records debug bus to the NW */ DBG_BUS_TARGET_ID_NIG, - /* records debug bus to a PCI buffer */ DBG_BUS_TARGET_ID_PCI, MAX_DBG_BUS_TARGETS }; @@ -2235,48 +2250,45 @@ struct dbg_grc_data { /* Debug GRC params */ enum dbg_grc_params { - DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */ - DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */ - DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */ - DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */ - DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */ - DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */ - DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */ - DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */ - DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */ - DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */ - DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */ - DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */ - DBG_GRC_PARAM_RESERVED, /* reserved */ - DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */ - DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */ - DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */ - DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */ - DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */ - DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */ - DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */ - DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */ - DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */ - DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */ - DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */ - DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */ - DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */ - DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */ - DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */ - DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */ - /* preset: exclude all memories from dump (1 only) */ + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_RESERVED, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_DUMP_NIG, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_NUM_LCIDS, + DBG_GRC_PARAM_NUM_LTIDS, DBG_GRC_PARAM_EXCLUDE_ALL, - /* preset: include memories for crash dump (1 only) */ DBG_GRC_PARAM_CRASH, - /* perform dump only if MFW is responding (0/1) */ DBG_GRC_PARAM_PARITY_SAFE, - DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */ - DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */ + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, DBG_GRC_PARAM_NO_MCP, DBG_GRC_PARAM_NO_FW_VER, MAX_DBG_GRC_PARAMS @@ -2347,7 +2359,10 @@ enum dbg_status { DBG_STATUS_REG_FIFO_BAD_DATA, DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_MULTI_BLOCKS_WITH_FILTER, + DBG_STATUS_FILTER_BUG, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET, + DBG_STATUS_DBG_BUS_IN_USE, MAX_DBG_STATUS }; @@ -2364,25 +2379,22 @@ enum dbg_storms { /* Idle Check data */ struct idle_chk_data { - __le32 buf_size; /* Idle check buffer size in dwords */ - u8 buf_size_set; /* Indicates if the idle check buffer size was set - * (0/1). - */ + __le32 buf_size; + u8 buf_size_set; u8 reserved1; __le16 reserved2; }; /* Debug Tools data (per HW function) */ struct dbg_tools_data { - struct dbg_grc_data grc; /* GRC Dump data */ - struct dbg_bus_data bus; /* Debug Bus data */ - struct idle_chk_data idle_chk; /* Idle Check data */ - u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */ - u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1). - */ - u8 chip_id; /* Chip ID (from enum chip_ids) */ - u8 platform_id; /* Platform ID (from enum platform_ids) */ - u8 initialized; /* Indicates if the data was initialized */ + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[88]; + u8 chip_id; + u8 platform_id; + u8 initialized; u8 reserved; }; @@ -2464,6 +2476,12 @@ struct init_qm_vport_params { /* Max size in dwords of a zipped array */ #define MAX_ZIPPED_SIZE 8192 +enum chip_ids { + CHIP_BB, + CHIP_K2, + CHIP_RESERVED, + MAX_CHIP_IDS +}; struct fw_asserts_ram_section { __le16 section_ram_line_offset; @@ -2475,18 +2493,18 @@ struct fw_asserts_ram_section { }; struct fw_ver_num { - u8 major; /* Firmware major version number */ - u8 minor; /* Firmware minor version number */ - u8 rev; /* Firmware revision version number */ - u8 eng; /* Firmware engineering version number (for bootleg versions) */ + u8 major; + u8 minor; + u8 rev; + u8 eng; }; struct fw_ver_info { - __le16 tools_ver; /* Tools version number */ - u8 image_id; /* FW image ID (e.g. main) */ + __le16 tools_ver; + u8 image_id; u8 reserved1; - struct fw_ver_num num; /* FW version number */ - __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */ + struct fw_ver_num num; + __le32 timestamp; __le32 reserved2; }; @@ -2722,7 +2740,6 @@ struct init_read_op { #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; - }; /* Init operations union */ @@ -2782,6 +2799,7 @@ struct iro { * @param bin_ptr - a pointer to the binary data with debug arrays. */ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); + /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. @@ -2805,6 +2823,7 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. * @@ -2824,6 +2843,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size * for idle check results. @@ -2840,6 +2860,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results * into the specified buffer. @@ -2860,6 +2881,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size * for mcp trace results. @@ -2878,6 +2900,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results * into the specified buffer. @@ -2902,6 +2925,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size * for grc trace fifo results. @@ -2917,6 +2941,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into * the specified buffer. @@ -2938,6 +2963,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size * for the IGU fifo results. @@ -2954,6 +2980,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); + /** * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into * the specified buffer. @@ -2975,6 +3002,7 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords); + /** * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required * buffer size for protection override window results. @@ -3074,6 +3102,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, * @param bin_ptr - a pointer to the binary data with debug arrays. */ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); + /** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * @@ -3082,6 +3111,7 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); * @return a string for the specified status */ const char *qed_dbg_get_status_str(enum dbg_status status); + /** * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size * for idle check results (in bytes). @@ -3116,6 +3146,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, char *results_buf, u32 *num_errors, u32 *num_warnings); + /** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size * for MCP Trace results (in bytes). @@ -3132,6 +3163,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_mcp_trace_results - Prints MCP Trace results * @@ -3146,6 +3178,7 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). @@ -3162,6 +3195,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_reg_fifo_results - Prints reg fifo results * @@ -3176,6 +3210,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size * for igu_fifo results (in bytes). @@ -3192,6 +3227,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_igu_fifo_results - Prints IGU fifo results * @@ -3206,6 +3242,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_protection_override_results_buf_size - Returns the required * buffer size for protection override results (in bytes). @@ -3223,6 +3260,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_protection_override_results - Prints protection override * results. @@ -3238,6 +3276,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + /** * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size * for FW Asserts results (in bytes). @@ -3254,6 +3293,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + /** * @brief qed_print_fw_asserts_results - Prints FW Asserts results * @@ -3268,6 +3308,269 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); + +/* Debug Bus blocks */ +static const u32 dbg_bus_blocks[] = { + 0x0000000f, /* grc, bb, 15 lines */ + 0x0000000f, /* grc, k2, 15 lines */ + 0x00000000, + 0x00000000, /* miscs, bb, 0 lines */ + 0x00000000, /* miscs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* misc, bb, 0 lines */ + 0x00000000, /* misc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* dbu, bb, 0 lines */ + 0x00000000, /* dbu, k2, 0 lines */ + 0x00000000, + 0x000f0127, /* pglue_b, bb, 39 lines */ + 0x0036012a, /* pglue_b, k2, 42 lines */ + 0x00000000, + 0x00000000, /* cnig, bb, 0 lines */ + 0x00120102, /* cnig, k2, 2 lines */ + 0x00000000, + 0x00000000, /* cpmu, bb, 0 lines */ + 0x00000000, /* cpmu, k2, 0 lines */ + 0x00000000, + 0x00000001, /* ncsi, bb, 1 lines */ + 0x00000001, /* ncsi, k2, 1 lines */ + 0x00000000, + 0x00000000, /* opte, bb, 0 lines */ + 0x00000000, /* opte, k2, 0 lines */ + 0x00000000, + 0x00600085, /* bmb, bb, 133 lines */ + 0x00600085, /* bmb, k2, 133 lines */ + 0x00000000, + 0x00000000, /* pcie, bb, 0 lines */ + 0x00e50033, /* pcie, k2, 51 lines */ + 0x00000000, + 0x00000000, /* mcp, bb, 0 lines */ + 0x00000000, /* mcp, k2, 0 lines */ + 0x00000000, + 0x01180009, /* mcp2, bb, 9 lines */ + 0x01180009, /* mcp2, k2, 9 lines */ + 0x00000000, + 0x01210104, /* pswhst, bb, 4 lines */ + 0x01210104, /* pswhst, k2, 4 lines */ + 0x00000000, + 0x01250103, /* pswhst2, bb, 3 lines */ + 0x01250103, /* pswhst2, k2, 3 lines */ + 0x00000000, + 0x00340101, /* pswrd, bb, 1 lines */ + 0x00340101, /* pswrd, k2, 1 lines */ + 0x00000000, + 0x01280119, /* pswrd2, bb, 25 lines */ + 0x01280119, /* pswrd2, k2, 25 lines */ + 0x00000000, + 0x01410109, /* pswwr, bb, 9 lines */ + 0x01410109, /* pswwr, k2, 9 lines */ + 0x00000000, + 0x00000000, /* pswwr2, bb, 0 lines */ + 0x00000000, /* pswwr2, k2, 0 lines */ + 0x00000000, + 0x001c0001, /* pswrq, bb, 1 lines */ + 0x001c0001, /* pswrq, k2, 1 lines */ + 0x00000000, + 0x014a0015, /* pswrq2, bb, 21 lines */ + 0x014a0015, /* pswrq2, k2, 21 lines */ + 0x00000000, + 0x00000000, /* pglcs, bb, 0 lines */ + 0x00120006, /* pglcs, k2, 6 lines */ + 0x00000000, + 0x00100001, /* dmae, bb, 1 lines */ + 0x00100001, /* dmae, k2, 1 lines */ + 0x00000000, + 0x015f0105, /* ptu, bb, 5 lines */ + 0x015f0105, /* ptu, k2, 5 lines */ + 0x00000000, + 0x01640120, /* tcm, bb, 32 lines */ + 0x01640120, /* tcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* mcm, bb, 32 lines */ + 0x01640120, /* mcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* ucm, bb, 32 lines */ + 0x01640120, /* ucm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* xcm, bb, 32 lines */ + 0x01640120, /* xcm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* ycm, bb, 32 lines */ + 0x01640120, /* ycm, k2, 32 lines */ + 0x00000000, + 0x01640120, /* pcm, bb, 32 lines */ + 0x01640120, /* pcm, k2, 32 lines */ + 0x00000000, + 0x01840062, /* qm, bb, 98 lines */ + 0x01840062, /* qm, k2, 98 lines */ + 0x00000000, + 0x01e60021, /* tm, bb, 33 lines */ + 0x01e60021, /* tm, k2, 33 lines */ + 0x00000000, + 0x02070107, /* dorq, bb, 7 lines */ + 0x02070107, /* dorq, k2, 7 lines */ + 0x00000000, + 0x00600185, /* brb, bb, 133 lines */ + 0x00600185, /* brb, k2, 133 lines */ + 0x00000000, + 0x020e0019, /* src, bb, 25 lines */ + 0x020c001a, /* src, k2, 26 lines */ + 0x00000000, + 0x02270104, /* prs, bb, 4 lines */ + 0x02270104, /* prs, k2, 4 lines */ + 0x00000000, + 0x022b0133, /* tsdm, bb, 51 lines */ + 0x022b0133, /* tsdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* msdm, bb, 51 lines */ + 0x022b0133, /* msdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* usdm, bb, 51 lines */ + 0x022b0133, /* usdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* xsdm, bb, 51 lines */ + 0x022b0133, /* xsdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* ysdm, bb, 51 lines */ + 0x022b0133, /* ysdm, k2, 51 lines */ + 0x00000000, + 0x022b0133, /* psdm, bb, 51 lines */ + 0x022b0133, /* psdm, k2, 51 lines */ + 0x00000000, + 0x025e010c, /* tsem, bb, 12 lines */ + 0x025e010c, /* tsem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* msem, bb, 12 lines */ + 0x025e010c, /* msem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* usem, bb, 12 lines */ + 0x025e010c, /* usem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* xsem, bb, 12 lines */ + 0x025e010c, /* xsem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* ysem, bb, 12 lines */ + 0x025e010c, /* ysem, k2, 12 lines */ + 0x00000000, + 0x025e010c, /* psem, bb, 12 lines */ + 0x025e010c, /* psem, k2, 12 lines */ + 0x00000000, + 0x026a000d, /* rss, bb, 13 lines */ + 0x026a000d, /* rss, k2, 13 lines */ + 0x00000000, + 0x02770106, /* tmld, bb, 6 lines */ + 0x02770106, /* tmld, k2, 6 lines */ + 0x00000000, + 0x027d0106, /* muld, bb, 6 lines */ + 0x027d0106, /* muld, k2, 6 lines */ + 0x00000000, + 0x02770005, /* yuld, bb, 5 lines */ + 0x02770005, /* yuld, k2, 5 lines */ + 0x00000000, + 0x02830107, /* xyld, bb, 7 lines */ + 0x027d0107, /* xyld, k2, 7 lines */ + 0x00000000, + 0x00000000, /* ptld, bb, 0 lines */ + 0x00000000, /* ptld, k2, 0 lines */ + 0x00000000, + 0x00000000, /* ypld, bb, 0 lines */ + 0x00000000, /* ypld, k2, 0 lines */ + 0x00000000, + 0x028a010e, /* prm, bb, 14 lines */ + 0x02980110, /* prm, k2, 16 lines */ + 0x00000000, + 0x02a8000d, /* pbf_pb1, bb, 13 lines */ + 0x02a8000d, /* pbf_pb1, k2, 13 lines */ + 0x00000000, + 0x02a8000d, /* pbf_pb2, bb, 13 lines */ + 0x02a8000d, /* pbf_pb2, k2, 13 lines */ + 0x00000000, + 0x02a8000d, /* rpb, bb, 13 lines */ + 0x02a8000d, /* rpb, k2, 13 lines */ + 0x00000000, + 0x00600185, /* btb, bb, 133 lines */ + 0x00600185, /* btb, k2, 133 lines */ + 0x00000000, + 0x02b50117, /* pbf, bb, 23 lines */ + 0x02b50117, /* pbf, k2, 23 lines */ + 0x00000000, + 0x02cc0006, /* rdif, bb, 6 lines */ + 0x02cc0006, /* rdif, k2, 6 lines */ + 0x00000000, + 0x02d20006, /* tdif, bb, 6 lines */ + 0x02d20006, /* tdif, k2, 6 lines */ + 0x00000000, + 0x02d80003, /* cdu, bb, 3 lines */ + 0x02db000e, /* cdu, k2, 14 lines */ + 0x00000000, + 0x02e9010d, /* ccfc, bb, 13 lines */ + 0x02f60117, /* ccfc, k2, 23 lines */ + 0x00000000, + 0x02e9010d, /* tcfc, bb, 13 lines */ + 0x02f60117, /* tcfc, k2, 23 lines */ + 0x00000000, + 0x030d0133, /* igu, bb, 51 lines */ + 0x030d0133, /* igu, k2, 51 lines */ + 0x00000000, + 0x03400106, /* cau, bb, 6 lines */ + 0x03400106, /* cau, k2, 6 lines */ + 0x00000000, + 0x00000000, /* rgfs, bb, 0 lines */ + 0x00000000, /* rgfs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* rgsrc, bb, 0 lines */ + 0x00000000, /* rgsrc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* tgfs, bb, 0 lines */ + 0x00000000, /* tgfs, k2, 0 lines */ + 0x00000000, + 0x00000000, /* tgsrc, bb, 0 lines */ + 0x00000000, /* tgsrc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* umac, bb, 0 lines */ + 0x00120006, /* umac, k2, 6 lines */ + 0x00000000, + 0x00000000, /* xmac, bb, 0 lines */ + 0x00000000, /* xmac, k2, 0 lines */ + 0x00000000, + 0x00000000, /* dbg, bb, 0 lines */ + 0x00000000, /* dbg, k2, 0 lines */ + 0x00000000, + 0x0346012b, /* nig, bb, 43 lines */ + 0x0346011d, /* nig, k2, 29 lines */ + 0x00000000, + 0x00000000, /* wol, bb, 0 lines */ + 0x001c0002, /* wol, k2, 2 lines */ + 0x00000000, + 0x00000000, /* bmbn, bb, 0 lines */ + 0x00210008, /* bmbn, k2, 8 lines */ + 0x00000000, + 0x00000000, /* ipc, bb, 0 lines */ + 0x00000000, /* ipc, k2, 0 lines */ + 0x00000000, + 0x00000000, /* nwm, bb, 0 lines */ + 0x0371000b, /* nwm, k2, 11 lines */ + 0x00000000, + 0x00000000, /* nws, bb, 0 lines */ + 0x037c0009, /* nws, k2, 9 lines */ + 0x00000000, + 0x00000000, /* ms, bb, 0 lines */ + 0x00120004, /* ms, k2, 4 lines */ + 0x00000000, + 0x00000000, /* phy_pcie, bb, 0 lines */ + 0x00e5001a, /* phy_pcie, k2, 26 lines */ + 0x00000000, + 0x00000000, /* led, bb, 0 lines */ + 0x00000000, /* led, k2, 0 lines */ + 0x00000000, + 0x00000000, /* avs_wrap, bb, 0 lines */ + 0x00000000, /* avs_wrap, k2, 0 lines */ + 0x00000000, + 0x00000000, /* bar0_map, bb, 0 lines */ + 0x00000000, /* bar0_map, k2, 0 lines */ + 0x00000000, +}; + /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL @@ -3589,37 +3892,37 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ (IRO[44].base + ((pf_id) * IRO[44].m1)) -static const struct iro iro_arr[47] = { +static const struct iro iro_arr[49] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb0, 0x80, 0x0, 0x0, 0x80}, - {0x6318, 0x20, 0x0, 0x0, 0x20}, + {0x6518, 0x20, 0x0, 0x0, 0x20}, {0xb00, 0x8, 0x0, 0x0, 0x4}, {0xa80, 0x8, 0x0, 0x0, 0x4}, {0x0, 0x8, 0x0, 0x0, 0x2}, {0x80, 0x8, 0x0, 0x0, 0x4}, {0x84, 0x8, 0x0, 0x0, 0x2}, - {0x4bc0, 0x0, 0x0, 0x0, 0x78}, + {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x3df0, 0x0, 0x0, 0x0, 0x78}, {0x29b0, 0x0, 0x0, 0x0, 0x78}, {0x4c38, 0x0, 0x0, 0x0, 0x78}, {0x4990, 0x0, 0x0, 0x0, 0x78}, - {0x7e48, 0x0, 0x0, 0x0, 0x78}, + {0x7f48, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, - {0x60f8, 0x10, 0x0, 0x0, 0x10}, - {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x61f8, 0x10, 0x0, 0x0, 0x10}, + {0xbd20, 0x30, 0x0, 0x0, 0x30}, {0x95b8, 0x30, 0x0, 0x0, 0x30}, {0x4b60, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a0, 0x80, 0x4, 0x0, 0x4}, - {0xc8f0, 0x0, 0x0, 0x0, 0x4}, + {0xc7c8, 0x0, 0x0, 0x0, 0x4}, {0x4ba0, 0x80, 0x0, 0x0, 0x20}, - {0x8050, 0x40, 0x0, 0x0, 0x30}, - {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x8150, 0x40, 0x0, 0x0, 0x30}, + {0xec70, 0x60, 0x0, 0x0, 0x60}, {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xf188, 0x78, 0x0, 0x0, 0x78}, + {0xf1b0, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xacf0, 0x0, 0x0, 0x0, 0xf0}, - {0xade0, 0x8, 0x0, 0x0, 0x8}, + {0xaef8, 0x0, 0x0, 0x0, 0xf0}, + {0xafe8, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@ -3627,16 +3930,18 @@ static const struct iro iro_arr[47] = { {0x0, 0x8, 0x0, 0x0, 0x8}, {0x200, 0x10, 0x8, 0x0, 0x8}, {0xb78, 0x10, 0x8, 0x0, 0x2}, - {0xd888, 0x38, 0x0, 0x0, 0x24}, - {0x12c38, 0x10, 0x0, 0x0, 0x8}, - {0x11aa0, 0x38, 0x0, 0x0, 0x18}, - {0xa8c0, 0x38, 0x0, 0x0, 0x10}, + {0xd9a8, 0x38, 0x0, 0x0, 0x24}, + {0x12988, 0x10, 0x0, 0x0, 0x8}, + {0x11fa0, 0x38, 0x0, 0x0, 0x18}, + {0xa580, 0x38, 0x0, 0x0, 0x10}, {0x86f8, 0x30, 0x0, 0x0, 0x18}, {0x101f8, 0x10, 0x0, 0x0, 0x10}, - {0xdd08, 0x48, 0x0, 0x0, 0x38}, + {0xde28, 0x48, 0x0, 0x0, 0x38}, {0x10660, 0x20, 0x0, 0x0, 0x20}, {0x2b80, 0x80, 0x0, 0x0, 0x10}, {0x5020, 0x10, 0x0, 0x0, 0x10}, + {0xc9b0, 0x30, 0x0, 0x0, 0x10}, + {0xeec0, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ @@ -3724,361 +4029,359 @@ static const struct iro iro_arr[47] = { #define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 #define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 #define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702 #define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 -#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705 -#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29839 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29967 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29968 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29967 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29968 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29969 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29970 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29971 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29972 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29973 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29974 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29975 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29976 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29977 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29978 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29979 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29980 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29981 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29982 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29983 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29984 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29985 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29986 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29987 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29988 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29989 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29990 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29991 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29992 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29993 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29994 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29995 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29996 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29997 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29998 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29999 +#define QM_REG_PQTX2PF_40_RT_OFFSET 30000 +#define QM_REG_PQTX2PF_41_RT_OFFSET 30001 +#define QM_REG_PQTX2PF_42_RT_OFFSET 30002 +#define QM_REG_PQTX2PF_43_RT_OFFSET 30003 +#define QM_REG_PQTX2PF_44_RT_OFFSET 30004 +#define QM_REG_PQTX2PF_45_RT_OFFSET 30005 +#define QM_REG_PQTX2PF_46_RT_OFFSET 30006 +#define QM_REG_PQTX2PF_47_RT_OFFSET 30007 +#define QM_REG_PQTX2PF_48_RT_OFFSET 30008 +#define QM_REG_PQTX2PF_49_RT_OFFSET 30009 +#define QM_REG_PQTX2PF_50_RT_OFFSET 30010 +#define QM_REG_PQTX2PF_51_RT_OFFSET 30011 +#define QM_REG_PQTX2PF_52_RT_OFFSET 30012 +#define QM_REG_PQTX2PF_53_RT_OFFSET 30013 +#define QM_REG_PQTX2PF_54_RT_OFFSET 30014 +#define QM_REG_PQTX2PF_55_RT_OFFSET 30015 +#define QM_REG_PQTX2PF_56_RT_OFFSET 30016 +#define QM_REG_PQTX2PF_57_RT_OFFSET 30017 +#define QM_REG_PQTX2PF_58_RT_OFFSET 30018 +#define QM_REG_PQTX2PF_59_RT_OFFSET 30019 +#define QM_REG_PQTX2PF_60_RT_OFFSET 30020 +#define QM_REG_PQTX2PF_61_RT_OFFSET 30021 +#define QM_REG_PQTX2PF_62_RT_OFFSET 30022 +#define QM_REG_PQTX2PF_63_RT_OFFSET 30023 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30510 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30564 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30767 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30769 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30821 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30823 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30801 +#define QM_REG_RLPFCRD_RT_OFFSET 30855 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30817 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819 +#define QM_REG_RLPFENABLE_RT_OFFSET 30871 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30851 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31011 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31012 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013 +#define QM_REG_WFQPFCRD_RT_OFFSET 30905 +#define QM_REG_WFQPFCRD_RT_SIZE 256 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31161 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31162 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31525 +#define QM_REG_TXPQMAP_RT_OFFSET 31675 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32549 +#define QM_REG_WFQVPCRD_RT_OFFSET 32699 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33061 +#define QM_REG_WFQVPMAP_RT_OFFSET 33211 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 +#define QM_REG_VOQCRDLINE_RT_OFFSET 34043 +#define QM_REG_VOQCRDLINE_RT_SIZE 36 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126 #define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926 - -#define RUNTIME_ARRAY_SIZE 33927 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308 + +#define RUNTIME_ARRAY_SIZE 34309 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -4307,7 +4610,7 @@ struct xstorm_eth_conn_ag_ctx { #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -4340,7 +4643,7 @@ struct xstorm_eth_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 ereserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -4627,6 +4930,7 @@ enum eth_error_code { ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, ETH_FILTERS_VNI_ADD_FAIL_FULL, ETH_FILTERS_VNI_ADD_FAIL_DUP, + ETH_FILTERS_GFT_UPDATE_FAIL, MAX_ETH_ERROR_CODE }; @@ -4879,6 +5183,39 @@ enum gft_logic_filter_type { MAX_GFT_LOGIC_FILTER_TYPE }; +struct rx_add_openflow_filter_data { + __le16 action_icid; + u8 priority; + u8 reserved0; + __le32 tenant_id; + __le16 dst_mac_hi; + __le16 dst_mac_mid; + __le16 dst_mac_lo; + __le16 src_mac_hi; + __le16 src_mac_mid; + __le16 src_mac_lo; + __le16 vlan_id; + __le16 l2_eth_type; + u8 ipv4_dscp; + u8 ipv4_frag_type; + u8 ipv4_over_ip; + u8 tenant_id_exists; + __le32 ipv4_dst_addr; + __le32 ipv4_src_addr; + __le16 l4_dst_port; + __le16 l4_src_port; +}; + +struct rx_create_gft_action_data { + u8 vport_id; + u8 reserved[7]; +}; + +struct rx_create_openflow_action_data { + u8 vport_id; + u8 reserved[7]; +}; + /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -4956,7 +5293,7 @@ struct rx_update_gft_filter_data { u8 vport_id; u8 filter_type; u8 filter_action; - u8 reserved; + u8 assert_on_error; }; /* Ramrod data for rx queue start ramrod */ @@ -5102,203 +5439,6 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct gft_cam_line { - __le32 camline; -#define GFT_CAM_LINE_VALID_MASK 0x1 -#define GFT_CAM_LINE_VALID_SHIFT 0 -#define GFT_CAM_LINE_DATA_MASK 0x3FFF -#define GFT_CAM_LINE_DATA_SHIFT 1 -#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF -#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 -#define GFT_CAM_LINE_RESERVED1_MASK 0x7 -#define GFT_CAM_LINE_RESERVED1_SHIFT 29 -}; - -struct gft_cam_line_mapped { - __le32 camline; -#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF -#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 -#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF -#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 -#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 -#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 -}; - -union gft_cam_line_union { - struct gft_cam_line cam_line; - struct gft_cam_line_mapped cam_line_mapped; -}; - -enum gft_profile_ip_version { - GFT_PROFILE_IPV4 = 0, - GFT_PROFILE_IPV6 = 1, - MAX_GFT_PROFILE_IP_VERSION -}; - -enum gft_profile_upper_protocol_type { - GFT_PROFILE_ROCE_PROTOCOL = 0, - GFT_PROFILE_RROCE_PROTOCOL = 1, - GFT_PROFILE_FCOE_PROTOCOL = 2, - GFT_PROFILE_ICMP_PROTOCOL = 3, - GFT_PROFILE_ARP_PROTOCOL = 4, - GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, - GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, - GFT_PROFILE_TCP_PROTOCOL = 7, - GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, - GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, - GFT_PROFILE_UDP_PROTOCOL = 10, - GFT_PROFILE_USER_IP_1_INNER = 11, - GFT_PROFILE_USER_IP_2_OUTER = 12, - GFT_PROFILE_USER_ETH_1_INNER = 13, - GFT_PROFILE_USER_ETH_2_OUTER = 14, - GFT_PROFILE_RAW = 15, - MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE -}; - -struct gft_ram_line { - __le32 low32bits; -#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 -#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 -#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 -#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 -#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 -#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 -#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 -#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 -#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 -#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 -#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 -#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 -#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 -#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 -#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 -#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 -#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 -#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 -#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 -#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 -#define GFT_RAM_LINE_TTL_MASK 0x1 -#define GFT_RAM_LINE_TTL_SHIFT 18 -#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 -#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 -#define GFT_RAM_LINE_RESERVED0_MASK 0x1 -#define GFT_RAM_LINE_RESERVED0_SHIFT 20 -#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 -#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 -#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 -#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 -#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 -#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 -#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 -#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 -#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 -#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 -#define GFT_RAM_LINE_DST_PORT_MASK 0x1 -#define GFT_RAM_LINE_DST_PORT_SHIFT 30 -#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 -#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 - __le32 high32bits; -#define GFT_RAM_LINE_DSCP_MASK 0x1 -#define GFT_RAM_LINE_DSCP_SHIFT 0 -#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 -#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 -#define GFT_RAM_LINE_DST_IP_MASK 0x1 -#define GFT_RAM_LINE_DST_IP_SHIFT 2 -#define GFT_RAM_LINE_SRC_IP_MASK 0x1 -#define GFT_RAM_LINE_SRC_IP_SHIFT 3 -#define GFT_RAM_LINE_PRIORITY_MASK 0x1 -#define GFT_RAM_LINE_PRIORITY_SHIFT 4 -#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 -#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 -#define GFT_RAM_LINE_VLAN_MASK 0x1 -#define GFT_RAM_LINE_VLAN_SHIFT 6 -#define GFT_RAM_LINE_DST_MAC_MASK 0x1 -#define GFT_RAM_LINE_DST_MAC_SHIFT 7 -#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 -#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 -#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 -#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 -#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF -#define GFT_RAM_LINE_RESERVED1_SHIFT 10 -}; - -struct mstorm_eth_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - struct xstorm_eth_conn_agctxdq_ext_ldpart { u8 reserved0; u8 eth_state; @@ -5511,7 +5651,7 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { #define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5528,6 +5668,43 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { __le32 reg4; }; +struct mstorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + struct xstorm_eth_hw_conn_ag_ctx { u8 reserved0; u8 eth_state; @@ -5740,7 +5917,7 @@ struct xstorm_eth_hw_conn_ag_ctx { #define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 quota; + __le16 ereserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5748,32 +5925,226 @@ struct xstorm_eth_hw_conn_ag_ctx { __le16 conn_dpi; }; -struct mstorm_rdma_task_st_ctx { - struct regpair temp[4]; -}; - -struct rdma_close_func_ramrod_data { - u8 cnq_start_offset; - u8 num_cnqs; - u8 vf_id; - u8 vf_valid; - u8 reserved[4]; -}; - -struct rdma_cnq_params { - __le16 sb_num; - u8 sb_index; - u8 num_pbl_pages; - __le32 reserved; - struct regpair pbl_base_addr; - __le16 queue_zone_num; - u8 reserved1[6]; +struct gft_cam_line { + __le32 camline; +#define GFT_CAM_LINE_VALID_MASK 0x1 +#define GFT_CAM_LINE_VALID_SHIFT 0 +#define GFT_CAM_LINE_DATA_MASK 0x3FFF +#define GFT_CAM_LINE_DATA_SHIFT 1 +#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF +#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 +#define GFT_CAM_LINE_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_RESERVED1_SHIFT 29 }; -struct rdma_create_cq_ramrod_data { - struct regpair cq_handle; - struct regpair pbl_addr; - __le32 max_cqes; +struct gft_cam_line_mapped { + __le32 camline; +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + +union gft_cam_line_union { + struct gft_cam_line cam_line; + struct gft_cam_line_mapped cam_line_mapped; +}; + +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + +struct gft_profile_key { + __le16 profile_key; +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +enum gft_profile_tunnel_type { + GFT_PROFILE_NO_TUNNEL = 0, + GFT_PROFILE_VXLAN_TUNNEL = 1, + GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2, + GFT_PROFILE_GRE_IP_TUNNEL = 3, + GFT_PROFILE_GENEVE_MAC_TUNNEL = 4, + GFT_PROFILE_GENEVE_IP_TUNNEL = 5, + MAX_GFT_PROFILE_TUNNEL_TYPE +}; + +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + +struct gft_ram_line { + __le32 lo; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 hi; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + +enum gft_vlan_select { + INNER_PROVIDER_VLAN = 0, + INNER_VLAN = 1, + OUTER_PROVIDER_VLAN = 2, + OUTER_VLAN = 3, + MAX_GFT_VLAN_SELECT +}; + +struct mstorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct rdma_close_func_ramrod_data { + u8 cnq_start_offset; + u8 num_cnqs; + u8 vf_id; + u8 vf_valid; + u8 reserved[4]; +}; + +struct rdma_cnq_params { + __le16 sb_num; + u8 sb_index; + u8 num_pbl_pages; + __le32 reserved; + struct regpair pbl_base_addr; + __le16 queue_zone_num; + u8 reserved1[6]; +}; + +struct rdma_create_cq_ramrod_data { + struct regpair cq_handle; + struct regpair pbl_addr; + __le32 max_cqes; __le16 pbl_num_pages; __le16 dpi; u8 is_two_level_pbl; @@ -5827,12 +6198,9 @@ struct rdma_init_func_hdr { u8 cnq_start_offset; u8 num_cnqs; u8 cq_ring_mode; - u8 cnp_vlan_priority; - __le32 cnp_send_timeout; - u8 cnp_dscp; u8 vf_id; u8 vf_valid; - u8 reserved[5]; + u8 reserved[3]; }; struct rdma_init_func_ramrod_data { @@ -5856,54 +6224,55 @@ enum rdma_ramrod_cmd_id { }; struct rdma_register_tid_ramrod_data { - __le32 flags; -#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF -#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F -#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18 -#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30 -#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31 + __le16 flags; +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 u8 flags1; -#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 u8 flags2; -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 u8 key; u8 length_hi; u8 vf_id; u8 vf_valid; __le16 pd; + __le16 reserved2; __le32 length_lo; __le32 itid; - __le32 reserved2; + __le32 reserved3; struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; struct regpair dif_runt_addr; - __le32 reserved3[2]; + __le32 reserved4[2]; }; struct rdma_resize_cq_output_params { @@ -6149,298 +6518,9 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; -struct mstorm_rdma_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - -struct tstorm_rdma_conn_ag_ctx { +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; - u8 byte1; - u8 flags0; -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 - u8 flags2; -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 - u8 flags4; -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le32 reg7; - __le32 reg8; - u8 byte2; - u8 byte3; - __le16 word0; - u8 byte4; - u8 byte5; - __le16 word1; - __le16 word2; - __le16 word3; - __le32 reg9; - __le32 reg10; -}; - -struct tstorm_rdma_task_ag_ctx { - u8 byte0; - u8 byte1; - __le16 word0; - u8 flags0; -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 - u8 flags2; -#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 - u8 flags3; -#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 - u8 flags4; -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 - u8 byte2; - __le16 word1; - __le32 reg0; - u8 byte3; - u8 byte4; - __le16 word2; - __le16 word3; - __le16 word4; - __le32 reg1; - __le32 reg2; -}; - -struct ustorm_rdma_conn_ag_ctx { - u8 reserved; - u8 byte1; - u8 flags0; -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 - u8 flags3; -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2; - u8 byte3; - __le16 conn_dpi; - __le16 word1; - __le32 cq_cons; - __le32 cq_se_prod; - __le32 cq_prod; - __le32 reg3; - __le16 int_timeout; - __le16 word3; -}; - -struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { - u8 reserved0; - u8 state; + u8 state; u8 flags0; #define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 #define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 @@ -6469,8 +6549,8 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { #define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 #define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 #define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 #define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 #define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 #define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 @@ -6647,22 +6727,311 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { #define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 #define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 u8 byte2; - __le16 physical_q0; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + +struct mstorm_rdma_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct tstorm_rdma_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct tstorm_rdma_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +struct ustorm_rdma_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 conn_dpi; __le16 word1; - __le16 word2; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 int_timeout; __le16 word3; - __le16 word4; - __le16 word5; - __le16 conn_dpi; - u8 byte3; - u8 byte4; - u8 byte5; - u8 byte6; - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 snd_nxt_psn; - __le32 reg4; }; struct xstorm_rdma_conn_ag_ctx { @@ -6696,8 +7065,8 @@ struct xstorm_rdma_conn_ag_ctx { #define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 #define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 #define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 #define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 #define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 #define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 @@ -7093,16 +7462,35 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +struct roce_events_stats { + __le16 silent_drops; + __le16 rnr_naks_sent; + __le32 retransmit_count; + __le32 icrc_error_count; + __le32 reserved; +}; + enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, ROCE_EVENT_QUERY_QP, ROCE_EVENT_DESTROY_QP, + ROCE_EVENT_CREATE_UD_QP, + ROCE_EVENT_DESTROY_UD_QP, MAX_ROCE_EVENT_OPCODE }; +struct roce_init_func_params { + u8 ll2_queue_id; + u8 cnp_vlan_priority; + u8 cnp_dscp; + u8 reserved; + __le32 cnp_send_timeout; +}; + struct roce_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; + struct roce_init_func_params roce; }; struct roce_modify_qp_req_ramrod_data { @@ -7222,6 +7610,8 @@ enum roce_ramrod_cmd_id { ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_QUERY_QP, ROCE_RAMROD_DESTROY_QP, + ROCE_RAMROD_CREATE_UD_QP, + ROCE_RAMROD_DESTROY_UD_QP, MAX_ROCE_RAMROD_CMD_ID }; @@ -7299,13 +7689,6 @@ struct mstorm_roce_resp_conn_ag_ctx { __le32 reg1; }; -enum roce_flavor { - PLAIN_ROCE /* RoCE v1 */ , - RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ , - RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ , - MAX_ROCE_FLAVOR -}; - struct tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; @@ -7416,8 +7799,8 @@ struct tstorm_roce_resp_conn_ag_ctx { u8 flags0; #define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 #define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 @@ -8097,7 +8480,7 @@ struct xstorm_roce_resp_conn_ag_ctx { __le16 irq_prod; __le16 word3; __le16 word4; - __le16 word5; + __le16 ereserved1; __le16 irq_cons; u8 rxmit_opcode; u8 byte4; @@ -8200,6 +8583,812 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +enum roce_flavor { + PLAIN_ROCE, + RROCE_IPV4, + RROCE_IPV6, + MAX_ROCE_FLAVOR +}; + +struct ystorm_iwarp_conn_st_ctx { + __le32 reserved[4]; +}; + +struct pstorm_iwarp_conn_st_ctx { + __le32 reserved[36]; +}; + +struct xstorm_iwarp_conn_st_ctx { + __le32 reserved[44]; +}; + +struct xstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 + u8 flags2; +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 sq_comp_cons; + __le16 sq_tx_cons; + __le16 sq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 rewinded_snd_max; + __le32 rd_msn; + __le16 irq_prod_via_msdm; + __le16 irq_cons; + __le16 hq_cons_th_or_mpa_data; + __le16 hq_cons; + __le32 atom_msn; + __le32 orq_cons; + __le32 orq_cons_th; + u8 byte7; + u8 max_ord; + u8 wqe_data_pad_bytes; + u8 former_hq_prod; + u8 irq_prod_via_msem; + u8 byte12; + u8 max_pkt_pdu_size_lo; + u8 max_pkt_pdu_size_hi; + u8 byte15; + u8 e5_reserved; + __le16 e5_reserved4; + __le32 reg10; + __le32 reg11; + __le32 shared_queue_page_addr_lo; + __le32 shared_queue_page_addr_hi; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 unaligned_nxt_seq; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 orq_cache_idx; + u8 hq_prod; + __le16 sq_tx_cons_th; + u8 orq_prod; + u8 irq_cons; + __le16 sq_tx_cons; + __le16 conn_dpi; + __le16 rq_prod; + __le32 snd_seq; + __le32 last_hq_sequence; +}; + +struct tstorm_iwarp_conn_st_ctx { + __le32 reserved[60]; +}; + +struct mstorm_iwarp_conn_st_ctx { + __le32 reserved[32]; +}; + +struct ustorm_iwarp_conn_st_ctx { + __le32 reserved[24]; +}; + +struct iwarp_conn_context { + struct ystorm_iwarp_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_iwarp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_iwarp_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_iwarp_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iwarp_conn_st_ctx mstorm_st_context; + struct ustorm_iwarp_conn_st_ctx ustorm_st_context; +}; + +struct iwarp_create_qp_ramrod_data { + u8 flags; +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 + u8 reserved1; + __le16 pd; + __le16 sq_num_pages; + __le16 rq_num_pages; + __le32 reserved3[2]; + struct regpair qp_handle_for_cqe; + struct rdma_srq_id srq_id; + __le32 cq_cid_for_sq; + __le32 cq_cid_for_rq; + __le16 dpi; + __le16 physical_q0; + __le16 physical_q1; + u8 reserved2[6]; +}; + +enum iwarp_eqe_async_opcode { + IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, + IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_CID_CLEANED, + IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, + IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, + IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + MAX_IWARP_EQE_ASYNC_OPCODE +}; + +struct iwarp_eqe_data_mpa_async_completion { + __le16 ulp_data_len; + u8 reserved[6]; +}; + +struct iwarp_eqe_data_tcp_async_completion { + __le16 ulp_data_len; + u8 mpa_handshake_mode; + u8 reserved[5]; +}; + +enum iwarp_eqe_sync_opcode { + IWARP_EVENT_TYPE_TCP_OFFLOAD = + 11, + IWARP_EVENT_TYPE_MPA_OFFLOAD, + IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, + IWARP_EVENT_TYPE_CREATE_QP, + IWARP_EVENT_TYPE_QUERY_QP, + IWARP_EVENT_TYPE_MODIFY_QP, + IWARP_EVENT_TYPE_DESTROY_QP, + MAX_IWARP_EQE_SYNC_OPCODE +}; + +enum iwarp_fw_return_code { + IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, + IWARP_CONN_ERROR_TCP_CONNECTION_RST, + IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT, + IWARP_CONN_ERROR_MPA_ERROR_REJECT, + IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER, + IWARP_CONN_ERROR_MPA_RST, + IWARP_CONN_ERROR_MPA_FIN, + IWARP_CONN_ERROR_MPA_RTR_MISMATCH, + IWARP_CONN_ERROR_MPA_INSUF_IRD, + IWARP_CONN_ERROR_MPA_INVALID_PACKET, + IWARP_CONN_ERROR_MPA_LOCAL_ERROR, + IWARP_CONN_ERROR_MPA_TIMEOUT, + IWARP_CONN_ERROR_MPA_TERMINATE, + IWARP_QP_IN_ERROR_GOOD_CLOSE, + IWARP_QP_IN_ERROR_BAD_CLOSE, + IWARP_EXCEPTION_DETECTED_LLP_CLOSED, + IWARP_EXCEPTION_DETECTED_LLP_RESET, + IWARP_EXCEPTION_DETECTED_IRQ_FULL, + IWARP_EXCEPTION_DETECTED_RQ_EMPTY, + IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, + IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, + IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, + IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC, + IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR, + IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR, + IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED, + MAX_IWARP_FW_RETURN_CODE +}; + +struct iwarp_init_func_params { + u8 ll2_ooo_q_index; + u8 reserved1[7]; +}; + +struct iwarp_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; + struct tcp_init_params tcp; + struct iwarp_init_func_params iwarp; +}; + +enum iwarp_modify_qp_new_state_type { + IWARP_MODIFY_QP_STATE_CLOSING = 1, + IWARP_MODIFY_QP_STATE_ERROR = + 2, + MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE +}; + +struct iwarp_modify_qp_ramrod_data { + __le16 transition_to_state; + __le16 flags; +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 + __le32 reserved3[3]; + __le32 reserved4[8]; +}; + +struct mpa_rq_params { + __le32 ird; + __le32 ord; +}; + +struct mpa_ulp_buffer { + struct regpair addr; + __le16 len; + __le16 reserved[3]; +}; + +struct mpa_outgoing_params { + u8 crc_needed; + u8 reject; + u8 reserved[6]; + struct mpa_rq_params out_rq; + struct mpa_ulp_buffer outgoing_ulp_buffer; +}; + +struct iwarp_mpa_offload_ramrod_data { + struct mpa_outgoing_params common; + __le32 tcp_cid; + u8 mode; + u8 tcp_connect_side; + u8 rtr_pref; +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 + u8 reserved2; + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + struct regpair shared_queue_addr; + u8 stats_counter_id; + u8 reserved3[15]; +}; + +struct iwarp_offload_params { + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + __le16 physical_q0; + __le16 physical_q1; + u8 stats_counter_id; + u8 mpa_mode; + u8 reserved[10]; +}; + +struct iwarp_query_qp_output_params { + __le32 flags; +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + u8 reserved1[4]; +}; + +struct iwarp_query_qp_ramrod_data { + struct regpair output_params_addr; +}; + +enum iwarp_ramrod_cmd_id { + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = + 11, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + IWARP_RAMROD_CMD_ID_CREATE_QP, + IWARP_RAMROD_CMD_ID_QUERY_QP, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + MAX_IWARP_RAMROD_CMD_ID +}; + +struct iwarp_rxmit_stats_drv { + struct regpair tx_go_to_slow_start_event_cnt; + struct regpair tx_fast_retransmit_event_cnt; +}; + +struct iwarp_tcp_offload_ramrod_data { + struct iwarp_offload_params iwarp; + struct tcp_offload_params_opt2 tcp; +}; + +enum mpa_negotiation_mode { + MPA_NEGOTIATION_TYPE_BASIC = 1, + MPA_NEGOTIATION_TYPE_ENHANCED = 2, + MAX_MPA_NEGOTIATION_MODE +}; + +enum mpa_rtr_type { + MPA_RTR_TYPE_NONE = 0, + MPA_RTR_TYPE_ZERO_SEND = 1, + MPA_RTR_TYPE_ZERO_WRITE = 2, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3, + MPA_RTR_TYPE_ZERO_READ = 4, + MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5, + MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7, + MAX_MPA_RTR_TYPE +}; + +struct unaligned_opaque_data { + __le16 first_mpa_offset; + u8 tcp_payload_offset; + u8 flags; +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 +#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F +#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 + __le32 cid; +}; + +struct mstorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 rcq_cons; + __le16 rcq_cons_th; + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ystorm_iwarp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct ystorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9222,7 +10411,7 @@ struct xstorm_iscsi_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 byte16; + u8 ereserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -10758,6 +11947,8 @@ struct static_init { u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */ }; +#define NVM_MAGIC_VALUE 0x669955aa + enum nvm_image_type { NVM_TYPE_TIM1 = 0x01, NVM_TYPE_TIM2 = 0x02, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 0a8fde629991..b069ad088269 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -40,31 +40,17 @@ #include "qed_init_ops.h" #include "qed_reg_addr.h" -enum cminterface { - MCM_SEC, - MCM_PRI, - UCM_SEC, - UCM_PRI, - TCM_SEC, - TCM_PRI, - YCM_SEC, - YCM_PRI, - XCM_SEC, - XCM_PRI, - NUM_OF_CM_INTERFACES -}; - -/* general constants */ +/* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 0x100) - 1 : 0) #define QM_INVALID_PQ_ID 0xffff -/* feature enable */ +/* Feature enable */ #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 -/* other PQ constants */ +/* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 /* WFQ constants */ #define QM_WFQ_UPPER_BOUND 62500000 @@ -106,20 +92,21 @@ enum cminterface { #define BTB_PURE_LB_FACTOR 10 #define BTB_PURE_LB_RATIO 7 /* QM stop command constants */ -#define QM_STOP_PQ_MASK_WIDTH 32 -#define QM_STOP_CMD_ADDR 0x2 -#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 -#define QM_STOP_CMD_PAUSE_MASK_MASK -1 -#define QM_STOP_CMD_GROUP_ID_OFFSET 1 -#define QM_STOP_CMD_GROUP_ID_SHIFT 16 -#define QM_STOP_CMD_GROUP_ID_MASK 15 -#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 -#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 -#define QM_STOP_CMD_PQ_TYPE_MASK 1 -#define QM_STOP_CMD_MAX_POLL_COUNT 100 -#define QM_STOP_CMD_POLL_PERIOD_US 500 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 + /* QM command macros */ #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ _STRUCT_SIZE @@ -146,16 +133,17 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { - /* enable RLs for all VOQs */ + /* Enable RLs for all VOQs */ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (1 << MAX_NUM_VOQS) - 1); - /* write RL period */ + /* Write RL period */ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, @@ -167,7 +155,8 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, @@ -180,14 +169,15 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); if (vport_rl_en) { - /* write RL period (use timer 0 only) */ + /* Write RL period (use timer 0 only) */ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, @@ -200,7 +190,8 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); - /* set credit threshold for QM bypass flow */ + + /* Set credit threshold for QM bypass flow */ if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, @@ -208,7 +199,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) } /* Prepare runtime init values to allocate PBF command queue lines for - * the specified VOQ + * the specified VOQ. */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, u8 voq, u16 cmdq_lines) @@ -232,7 +223,7 @@ static void qed_cmdq_lines_rt_init( { u8 tc, voq, port_id, num_tcs_in_port; - /* clear PBF lines for all VOQs */ + /* Clear PBF lines for all VOQs */ for (voq = 0; voq < MAX_NUM_VOQS; voq++) STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); for (port_id = 0; port_id < max_ports_per_engine; port_id++) { @@ -285,7 +276,7 @@ static void qed_btb_blocks_rt_init( if (!port_params[port_id].active) continue; - /* subtract headroom blocks */ + /* Subtract headroom blocks */ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; @@ -305,7 +296,7 @@ static void qed_btb_blocks_rt_init( phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; - /* init physical TCs */ + /* Init physical TCs */ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) != 1) @@ -317,7 +308,7 @@ static void qed_btb_blocks_rt_init( phys_blocks); } - /* init pure LB TC */ + /* Init pure LB TC */ temp = LB_VOQ(port_id); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), pure_lb_blocks); @@ -338,24 +329,24 @@ static void qed_tx_pq_map_rt_init( QM_PF_QUEUE_GROUP_SIZE; u16 i, pq_id, pq_group; - /* a bit per Tx PQ indicating if the PQ is associated with a VF */ + /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); u32 mem_addr_4kb = base_mem_addr_4kb; - /* set mapping from PQ group to PF */ + /* Set mapping from PQ group to PF */ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(p_params->pf_id)); - /* set PQ sizes */ + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_pf_cids)); STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_vf_cids)); - /* go over all Tx PQs */ + /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, p_params->max_phys_tcs_per_port); @@ -366,17 +357,18 @@ static void qed_tx_pq_map_rt_init( (p_params->pq_params[i].vport_id < MAX_QM_GLOBAL_RLS); - /* update first Tx PQ of VPORT/TC */ + /* Update first Tx PQ of VPORT/TC */ u8 vport_id_in_pf = p_params->pq_params[i].vport_id - p_params->start_vport; u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; if (first_tx_pq_id == QM_INVALID_PQ_ID) { - /* create new VP PQ */ + /* Create new VP PQ */ pq_ids[p_params->pq_params[i].tc_id] = pq_id; first_tx_pq_id = pq_id; - /* map VP PQ to VOQ and PF */ + + /* Map VP PQ to VOQ and PF */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, @@ -388,7 +380,7 @@ static void qed_tx_pq_map_rt_init( if (p_params->pq_params[i].rl_valid && !rl_valid) DP_NOTICE(p_hwfn, "Invalid VPORT ID for rate limiter configuration"); - /* fill PQ map entry */ + /* Fill PQ map entry */ memset(&tx_pq_map, 0, sizeof(tx_pq_map)); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, @@ -400,18 +392,16 @@ static void qed_tx_pq_map_rt_init( SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, p_params->pq_params[i].wrr_group); - /* write PQ map entry to CAM */ + /* Write PQ map entry to CAM */ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&tx_pq_map)); - /* set base address */ + /* Set base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); - /* check if VF PQ */ + + /* If VF PQ, add indication to PQ VF mask */ if (is_vf_pq) { - /* if PQ is associated with a VF, add indication - * to PQ VF mask - */ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); @@ -421,16 +411,12 @@ static void qed_tx_pq_map_rt_init( } } - /* store Tx PQ VF mask to size select register */ - for (i = 0; i < num_tx_pq_vf_masks; i++) { - if (tx_pq_vf_mask[i]) { - u32 addr; - - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - STORE_RT_REG(p_hwfn, addr, + /* Store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) + if (tx_pq_vf_mask[i]) + STORE_RT_REG(p_hwfn, + QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); - } - } } /* Prepare Other PQ mapping runtime init values for the specified PF */ @@ -440,23 +426,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { - u16 i, pq_id; + u32 pq_size, pq_mem_4kb, mem_addr_4kb; + u16 i, pq_id, pq_group; /* a single other PQ group is used in each PF, * where PQ group i is used in PF i. */ - u16 pq_group = pf_id; - u32 pq_size = num_pf_cids + num_tids; - u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); - u32 mem_addr_4kb = base_mem_addr_4kb; + pq_group = pf_id; + pq_size = num_pf_cids + num_tids; + pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + mem_addr_4kb = base_mem_addr_4kb; - /* map PQ group to PF */ + /* Map PQ group to PF */ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); - /* set PQ sizes */ + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* set base address */ + + /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { STORE_RT_REG(p_hwfn, @@ -485,7 +473,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -514,7 +502,7 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, @@ -535,7 +523,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u32 inc_val; u8 tc, i; - /* go over all PF VPORTs */ + /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { if (!vport_params[i].vport_wfq) @@ -544,7 +532,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); if (inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, - "Invalid VPORT WFQ weight configuration"); + "Invalid VPORT WFQ weight configuration\n"); return -1; } @@ -578,17 +566,17 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); + "Invalid VPORT ID for rate limiter configuration\n"); return -1; } - /* go over all PF VPORTs */ + /* Go over all PF VPORTs */ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); if (inc_val > QM_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, - "Invalid VPORT rate-limit configuration"); + "Invalid VPORT rate-limit configuration\n"); return -1; } @@ -617,7 +605,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); } - /* check if timeout while waiting for SDM command ready */ + /* Check if timeout while waiting for SDM command ready */ if (i == QM_STOP_CMD_MAX_POLL_COUNT) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Timeout when waiting for QM SDM command ready signal\n"); @@ -701,16 +689,16 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, QM_OTHER_PQS_PER_PF; u8 tc, i; - /* clear first Tx PQ ID array for each VPORT */ + /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; - /* map Other PQs (if any) */ + /* Map Other PQs (if any) */ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, p_params->num_pf_cids, p_params->num_tids, 0); - /* map Tx PQs */ + /* Map Tx PQs */ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); if (p_params->pf_wfq) @@ -736,7 +724,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -750,7 +738,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } @@ -766,17 +754,18 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) { - u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); + u16 vport_pq_id; + u32 inc_val; u8 tc; + inc_val = QM_WFQ_INC_VAL(vport_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration"); + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n"); return -1; } for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = first_tx_pq_id[tc]; - + vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, @@ -793,12 +782,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, if (vport_id >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); + "Invalid VPORT ID for rate limiter configuration\n"); return -1; } if (inc_val > QM_RL_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); + DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } @@ -818,15 +807,15 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; - /* set command's PQ type */ + /* Set command's PQ type */ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { - /* set PQ bit in mask (stop command only) */ + /* Set PQ bit in mask (stop command only) */ if (!is_release_cmd) pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); - /* if last PQ or end of PQ mask, write command */ + /* If last PQ or end of PQ mask, write command */ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { @@ -962,8 +951,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define T_ETH_PACKET_ACTION_GFT_EVENTID 23 +#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 -#define PARSER_ETH_CONN_CM_HDR (0x0) +#define PARSER_ETH_CONN_CM_HDR 0 #define CAM_LINE_SIZE sizeof(u32) #define RAM_LINE_SIZE sizeof(u64) #define REG_SIZE sizeof(u32) @@ -971,40 +962,26 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - union gft_cam_line_union camline; - struct gft_ram_line ramline; - u32 *p_ramline, i; - - p_ramline = (u32 *)&ramline; + u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM + + pf_id * RAM_LINE_SIZE; /*stop using gft logic */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0); - memset(&camline, 0, sizeof(union gft_cam_line_union)); - qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, - camline.cam_line_mapped.camline); - memset(&ramline, 0, sizeof(ramline)); - - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { - u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; - - hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE); - - qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i)); - } + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); + qed_wr(p_hwfn, p_ptt, hw_addr, 0); + qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0); } void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id, bool tcp, bool udp, bool ipv4, bool ipv6) { - u32 rfs_cm_hdr_event_id, *p_ramline; union gft_cam_line_union camline; struct gft_ram_line ramline; - int i; + u32 rfs_cm_hdr_event_id; rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); - p_ramline = (u32 *)&ramline; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1024,18 +1001,20 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); camline.cam_line_mapped.camline = 0; - /* cam line is now valid!! */ + /* Cam line is now valid!! */ SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1); /* filters are per PF!! */ SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1); + GFT_CAM_LINE_MAPPED_PF_ID_MASK, + GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); if (!(tcp && udp)) { SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1); + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); if (tcp) SET_FIELD(camline.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, @@ -1059,34 +1038,38 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, GFT_PROFILE_IPV6); } - /* write characteristics to cam */ + /* Write characteristics to cam */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, camline.cam_line_mapped.camline); camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); - /* write line to RAM - compare to filter 4 tuple */ - ramline.low32bits = 0; - ramline.high32bits = 0; - SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1); - - /* each iteration write to reg */ - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + - i * REG_SIZE, *(p_ramline + i)); - - /* set default profile so that no filter match will happen */ - ramline.low32bits = 0xffff; - ramline.high32bits = 0xffff; - - for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * - PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE, - *(p_ramline + i)); + /* Write line to RAM - compare to filter 4 tuple */ + ramline.lo = 0; + ramline.hi = 0; + SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1); + + /* Each iteration write to reg */ + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + ramline.lo); + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4, + ramline.hi); + + /* Set default profile so that no filter match will happen */ + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, + ramline.lo); + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4, + ramline.hi); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 339c91dfa658..3897ac0ae835 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -375,7 +375,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); p_tcp->srtt = cpu_to_le16(p_conn->srtt); p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); - p_tcp->ts_time = cpu_to_le32(p_conn->ts_time); p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); @@ -400,8 +399,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->mss = cpu_to_le16(p_conn->mss); p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; - dval = p_conn->ts_ticks_per_second; - p_tcp->ts_ticks_per_second = cpu_to_le32(dval); wval = p_conn->da_timeout_value; p_tcp->da_timeout_value = cpu_to_le16(wval); p_tcp->ack_frequency = p_conn->ack_frequency; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 1ae73b2d6d1e..f14772b9cda3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -592,15 +592,15 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL -#define PGLCS_REG_DBG_SELECT \ +#define PGLCS_REG_DBG_SELECT_K2 \ 0x001d14UL -#define PGLCS_REG_DBG_DWORD_ENABLE \ +#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \ 0x001d18UL -#define PGLCS_REG_DBG_SHIFT \ +#define PGLCS_REG_DBG_SHIFT_K2 \ 0x001d1cUL -#define PGLCS_REG_DBG_FORCE_VALID \ +#define PGLCS_REG_DBG_FORCE_VALID_K2 \ 0x001d20UL -#define PGLCS_REG_DBG_FORCE_FRAME \ +#define PGLCS_REG_DBG_FORCE_FRAME_K2 \ 0x001d24UL #define MISC_REG_RESET_PL_PDA_VMAIN_1 \ 0x008070UL @@ -612,7 +612,7 @@ 0x009050UL #define MISCS_REG_RESET_PL_HV \ 0x009060UL -#define MISCS_REG_RESET_PL_HV_2 \ +#define MISCS_REG_RESET_PL_HV_2_K2 \ 0x009150UL #define DMAE_REG_DBG_SELECT \ 0x00c510UL @@ -644,15 +644,15 @@ 0x0500b0UL #define GRC_REG_DBG_FORCE_FRAME \ 0x0500b4UL -#define UMAC_REG_DBG_SELECT \ +#define UMAC_REG_DBG_SELECT_K2 \ 0x051094UL -#define UMAC_REG_DBG_DWORD_ENABLE \ +#define UMAC_REG_DBG_DWORD_ENABLE_K2 \ 0x051098UL -#define UMAC_REG_DBG_SHIFT \ +#define UMAC_REG_DBG_SHIFT_K2 \ 0x05109cUL -#define UMAC_REG_DBG_FORCE_VALID \ +#define UMAC_REG_DBG_FORCE_VALID_K2 \ 0x0510a0UL -#define UMAC_REG_DBG_FORCE_FRAME \ +#define UMAC_REG_DBG_FORCE_FRAME_K2 \ 0x0510a4UL #define MCP2_REG_DBG_SELECT \ 0x052400UL @@ -924,15 +924,15 @@ 0x4c160cUL #define XYLD_REG_DBG_FORCE_FRAME \ 0x4c1610UL -#define YULD_REG_DBG_SELECT \ +#define YULD_REG_DBG_SELECT_BB_K2 \ 0x4c9600UL -#define YULD_REG_DBG_DWORD_ENABLE \ +#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \ 0x4c9604UL -#define YULD_REG_DBG_SHIFT \ +#define YULD_REG_DBG_SHIFT_BB_K2 \ 0x4c9608UL -#define YULD_REG_DBG_FORCE_VALID \ +#define YULD_REG_DBG_FORCE_VALID_BB_K2 \ 0x4c960cUL -#define YULD_REG_DBG_FORCE_FRAME \ +#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \ 0x4c9610UL #define TMLD_REG_DBG_SELECT \ 0x4d1600UL @@ -994,35 +994,35 @@ 0x580710UL #define CDU_REG_DBG_FORCE_FRAME \ 0x580714UL -#define WOL_REG_DBG_SELECT \ +#define WOL_REG_DBG_SELECT_K2 \ 0x600140UL -#define WOL_REG_DBG_DWORD_ENABLE \ +#define WOL_REG_DBG_DWORD_ENABLE_K2 \ 0x600144UL -#define WOL_REG_DBG_SHIFT \ +#define WOL_REG_DBG_SHIFT_K2 \ 0x600148UL -#define WOL_REG_DBG_FORCE_VALID \ +#define WOL_REG_DBG_FORCE_VALID_K2 \ 0x60014cUL -#define WOL_REG_DBG_FORCE_FRAME \ +#define WOL_REG_DBG_FORCE_FRAME_K2 \ 0x600150UL -#define BMBN_REG_DBG_SELECT \ +#define BMBN_REG_DBG_SELECT_K2 \ 0x610140UL -#define BMBN_REG_DBG_DWORD_ENABLE \ +#define BMBN_REG_DBG_DWORD_ENABLE_K2 \ 0x610144UL -#define BMBN_REG_DBG_SHIFT \ +#define BMBN_REG_DBG_SHIFT_K2 \ 0x610148UL -#define BMBN_REG_DBG_FORCE_VALID \ +#define BMBN_REG_DBG_FORCE_VALID_K2 \ 0x61014cUL -#define BMBN_REG_DBG_FORCE_FRAME \ +#define BMBN_REG_DBG_FORCE_FRAME_K2 \ 0x610150UL -#define NWM_REG_DBG_SELECT \ +#define NWM_REG_DBG_SELECT_K2 \ 0x8000ecUL -#define NWM_REG_DBG_DWORD_ENABLE \ +#define NWM_REG_DBG_DWORD_ENABLE_K2 \ 0x8000f0UL -#define NWM_REG_DBG_SHIFT \ +#define NWM_REG_DBG_SHIFT_K2 \ 0x8000f4UL -#define NWM_REG_DBG_FORCE_VALID \ +#define NWM_REG_DBG_FORCE_VALID_K2 \ 0x8000f8UL -#define NWM_REG_DBG_FORCE_FRAME \ +#define NWM_REG_DBG_FORCE_FRAME_K2\ 0x8000fcUL #define PBF_REG_DBG_SELECT \ 0xd80060UL @@ -1244,35 +1244,35 @@ 0x1901534UL #define USEM_REG_DBG_FORCE_FRAME \ 0x1901538UL -#define NWS_REG_DBG_SELECT \ +#define NWS_REG_DBG_SELECT_K2 \ 0x700128UL -#define NWS_REG_DBG_DWORD_ENABLE \ +#define NWS_REG_DBG_DWORD_ENABLE_K2 \ 0x70012cUL -#define NWS_REG_DBG_SHIFT \ +#define NWS_REG_DBG_SHIFT_K2 \ 0x700130UL -#define NWS_REG_DBG_FORCE_VALID \ +#define NWS_REG_DBG_FORCE_VALID_K2 \ 0x700134UL -#define NWS_REG_DBG_FORCE_FRAME \ +#define NWS_REG_DBG_FORCE_FRAME_K2 \ 0x700138UL -#define MS_REG_DBG_SELECT \ +#define MS_REG_DBG_SELECT_K2 \ 0x6a0228UL -#define MS_REG_DBG_DWORD_ENABLE \ +#define MS_REG_DBG_DWORD_ENABLE_K2 \ 0x6a022cUL -#define MS_REG_DBG_SHIFT \ +#define MS_REG_DBG_SHIFT_K2 \ 0x6a0230UL -#define MS_REG_DBG_FORCE_VALID \ +#define MS_REG_DBG_FORCE_VALID_K2 \ 0x6a0234UL -#define MS_REG_DBG_FORCE_FRAME \ +#define MS_REG_DBG_FORCE_FRAME_K2 \ 0x6a0238UL -#define PCIE_REG_DBG_COMMON_SELECT \ +#define PCIE_REG_DBG_COMMON_SELECT_K2 \ 0x054398UL -#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \ +#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \ 0x05439cUL -#define PCIE_REG_DBG_COMMON_SHIFT \ +#define PCIE_REG_DBG_COMMON_SHIFT_K2 \ 0x0543a0UL -#define PCIE_REG_DBG_COMMON_FORCE_VALID \ +#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \ 0x0543a4UL -#define PCIE_REG_DBG_COMMON_FORCE_FRAME \ +#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \ 0x0543a8UL #define MISC_REG_RESET_PL_UA \ 0x008050UL @@ -1328,85 +1328,85 @@ 0x128170cUL #define UCM_REG_SM_TASK_CTX \ 0x1281710UL -#define XSEM_REG_SLOW_DBG_EMPTY \ +#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1401140UL #define XSEM_REG_SYNC_DBG_EMPTY \ 0x1401160UL -#define XSEM_REG_SLOW_DBG_ACTIVE \ +#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1401400UL -#define XSEM_REG_SLOW_DBG_MODE \ +#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1401404UL -#define XSEM_REG_DBG_FRAME_MODE \ +#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1401408UL -#define XSEM_REG_DBG_MODE1_CFG \ +#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ 0x1440000UL #define YSEM_REG_SYNC_DBG_EMPTY \ 0x1501160UL -#define YSEM_REG_SLOW_DBG_ACTIVE \ +#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1501400UL -#define YSEM_REG_SLOW_DBG_MODE \ +#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1501404UL -#define YSEM_REG_DBG_FRAME_MODE \ +#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1501408UL -#define YSEM_REG_DBG_MODE1_CFG \ +#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ 0x1540000UL -#define PSEM_REG_SLOW_DBG_EMPTY \ +#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1601140UL #define PSEM_REG_SYNC_DBG_EMPTY \ 0x1601160UL -#define PSEM_REG_SLOW_DBG_ACTIVE \ +#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1601400UL -#define PSEM_REG_SLOW_DBG_MODE \ +#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1601404UL -#define PSEM_REG_DBG_FRAME_MODE \ +#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1601408UL -#define PSEM_REG_DBG_MODE1_CFG \ +#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ 0x1640000UL -#define TSEM_REG_SLOW_DBG_EMPTY \ +#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1701140UL #define TSEM_REG_SYNC_DBG_EMPTY \ 0x1701160UL -#define TSEM_REG_SLOW_DBG_ACTIVE \ +#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1701400UL -#define TSEM_REG_SLOW_DBG_MODE \ +#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1701404UL -#define TSEM_REG_DBG_FRAME_MODE \ +#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1701408UL -#define TSEM_REG_DBG_MODE1_CFG \ +#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ 0x1740000UL -#define MSEM_REG_SLOW_DBG_EMPTY \ +#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1801140UL #define MSEM_REG_SYNC_DBG_EMPTY \ 0x1801160UL -#define MSEM_REG_SLOW_DBG_ACTIVE \ +#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1801400UL -#define MSEM_REG_SLOW_DBG_MODE \ +#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1801404UL -#define MSEM_REG_DBG_FRAME_MODE \ +#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1801408UL -#define MSEM_REG_DBG_MODE1_CFG \ +#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL -#define USEM_REG_SLOW_DBG_EMPTY \ +#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1901140UL #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL -#define USEM_REG_SLOW_DBG_ACTIVE \ +#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ 0x1901400UL -#define USEM_REG_SLOW_DBG_MODE \ +#define USEM_REG_SLOW_DBG_MODE_BB_K2 \ 0x1901404UL -#define USEM_REG_DBG_FRAME_MODE \ +#define USEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1901408UL -#define USEM_REG_DBG_MODE1_CFG \ +#define USEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL @@ -1430,7 +1430,7 @@ 0x340800UL #define BRB_REG_BIG_RAM_DATA \ 0x341500UL -#define SEM_FAST_REG_STALL_0 \ +#define SEM_FAST_REG_STALL_0_BB_K2 \ 0x000488UL #define SEM_FAST_REG_STALLED \ 0x000494UL @@ -1480,37 +1480,37 @@ 4 #define MISC_REG_BLOCK_256B_EN \ 0x008c14UL -#define NWS_REG_NWS_CMU \ +#define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ 0x0006c4UL -#define MS_REG_MS_CMU \ +#define MS_REG_MS_CMU_K2 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \ - 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ + 0x000210UL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_REG_PHY0 \ +#define PHY_PCIE_REG_PHY0_K2 \ 0x620000UL -#define PHY_PCIE_REG_PHY1 \ +#define PHY_PCIE_REG_PHY1_K2 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 56289d7cd306..eb1a5cfc49c0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2431,10 +2431,6 @@ qed_rdma_register_tid(void *rdma_cxt, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, params->page_size_log - 12); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID, - p_hwfn->p_rdma_info->last_tid); - SET_FIELD(p_ramrod->flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, params->remote_read); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index bc3694e91b85..5abcac64d969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -185,22 +185,20 @@ static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, } static void -__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, +__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type) { *p_tunn_cls = tun_type->tun_cls; - - if (tun_type->b_mode_enabled) - *p_enable_tx_clas = 1; } static void -qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, +qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type, - u8 *p_update_port, __le16 *p_port, + u8 *p_update_port, + __le16 *p_port, struct qed_tunn_update_udp_port *p_udp_port) { - __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type); + __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type); if (p_udp_port->b_update_port) { *p_update_port = 1; *p_port = cpu_to_le16(p_udp_port->port); @@ -219,33 +217,27 @@ qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, - &p_tunn_cfg->tx_enable_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, - &p_tunn_cfg->tx_enable_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, - &p_tunn_cfg->tx_enable_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, - &p_tunn_cfg->tx_enable_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, - &p_tunn_cfg->tx_enable_ipgre, &p_tun->ip_gre); p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; - p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls; } static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, @@ -289,29 +281,24 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, - &p_tunn_cfg->tx_enable_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, - &p_tunn_cfg->tx_enable_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, - &p_tunn_cfg->tx_enable_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, - &p_tunn_cfg->tx_enable_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, - &p_tunn_cfg->tx_enable_ipgre, &p_tun->ip_gre); } diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index d6978cbc56f0..7658138f2283 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -2099,14 +2099,16 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) /* Update header info */ SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE); - if (sc->sc_data_direction == DMA_TO_DEVICE) { - SET_FIELD(cmd_pdu_header.flags_attr, - ISCSI_CMD_HDR_WRITE, 1); - task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; - } else { - SET_FIELD(cmd_pdu_header.flags_attr, - ISCSI_CMD_HDR_READ, 1); - task_type = ISCSI_TASK_TYPE_INITIATOR_READ; + if (hdr->cdb[0] != TEST_UNIT_READY) { + if (sc->sc_data_direction == DMA_TO_DEVICE) { + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_WRITE, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; + } else { + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_READ, 1); + task_type = ISCSI_TASK_TYPE_INITIATOR_READ; + } } cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); @@ -2117,7 +2119,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length); cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength); cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn); - cmd_pdu_header.opcode = hdr->opcode; + cmd_pdu_header.hdr_first_byte = hdr->opcode; qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb); /* Fill tx AHS and rx buffer */ diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c index fd354d4e03eb..7df32a68bd54 100644 --- a/drivers/scsi/qedi/qedi_fw_api.c +++ b/drivers/scsi/qedi/qedi_fw_api.c @@ -578,7 +578,8 @@ int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, (struct iscsi_common_hdr *)cmd_header, tx_sgl_params, cmd_params, dif_task_params); - else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ)) + else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) || + (task_params->rx_io_size == 0 && task_params->tx_io_size == 0)) return init_rw_iscsi_task(task_params, ISCSI_TASK_TYPE_INITIATOR_READ, conn_params, diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 3548d46f9b27..0c8ccffa4c38 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1461,9 +1461,6 @@ static const struct { { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, "out of sge error" }, - { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, - "tcp seg ip options error" - }, { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, "tcp ip fragment error" }, diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index fbab6e0514f0..a567cbf8c5b4 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -96,12 +96,12 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 -#define MAX_NUM_LL2_RX_QUEUES 32 -#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 +#define MAX_NUM_LL2_RX_QUEUES 48 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 15 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 20 +#define FW_REVISION_VERSION 0 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -181,6 +181,14 @@ #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + /*****************/ /* DQ CONSTANTS */ /*****************/ @@ -457,7 +465,6 @@ #define PXP_BAR_DQ 1 /* PTT and GTT */ -#define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 @@ -482,6 +489,7 @@ #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc +#define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 @@ -618,16 +626,21 @@ /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 -/******************/ -/* SDMs CONSTANTS */ -/******************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 -#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 -#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 +#define PRM_DMA_PAD_BYTES_NUM 2 +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/********************/ +/* Completion types */ +/********************/ #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 @@ -638,10 +651,11 @@ #define SDM_COMP_TYPE_INDICATE_ERROR 6 #define SDM_COMP_TYPE_RELEASE_THREAD 7 #define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 -/******************/ -/* PBF CONSTANTS */ -/******************/ +/*****************/ +/* PBF Constants */ +/*****************/ /* Number of PBF command queue lines. Each line is 32B. */ #define PBF_MAX_CMD_LINES 3328 @@ -861,7 +875,7 @@ enum db_dest { /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, - DPM_ROCE, + DPM_RDMA, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE @@ -884,8 +898,8 @@ struct db_l2_dpm_data { #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 -#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 -#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 +#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ @@ -931,31 +945,33 @@ struct db_pwm_addr { }; /* Parameters to RoCE firmware, passed in EDPM doorbell */ -struct db_roce_dpm_params { +struct db_rdma_dpm_params { __le32 params; -#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; /* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ -struct db_roce_dpm_data { +struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; - struct db_roce_dpm_params params; + struct db_rdma_dpm_params params; }; /* Igu interrupt command */ @@ -1026,6 +1042,42 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +struct parsing_err_flags { + __le16 flags; +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + struct pb_context { __le32 crc[4]; }; @@ -1288,39 +1340,56 @@ struct tdif_task_context { struct timers_context { __le32 logical_client_0; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_1; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 __le32 logical_client_2; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 __le32 host_expiration_fields; -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 -#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 -#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; + +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + #endif /* __COMMON_HSI__ */ #endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 34d93eb5bfba..cb06e6e368e1 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -75,7 +75,8 @@ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 947a635d04bb..12fc9e788eea 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -13,7 +13,6 @@ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) struct fcoe_abts_pkt { __le32 abts_rsp_fc_payload_lo; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 69949f8e354b..85e086cba639 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -75,25 +75,13 @@ #define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ -#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) -#define ISCSI_OPCODE_NOP_OUT ( \ - ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) -#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) -#define ISCSI_OPCODE_SCSI_CMD ( \ - ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) -#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) -#define ISCSI_OPCODE_TMF_REQUEST ( \ - ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) -#define ISCSI_OPCODE_LOGIN_REQUEST ( \ - ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) -#define ISCSI_OPCODE_TEXT_REQUEST ( \ - ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_DATA_OUT (5) -#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) -#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ - ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_NOP_OUT (0) +#define ISCSI_OPCODE_SCSI_CMD (1) +#define ISCSI_OPCODE_TMF_REQUEST (2) +#define ISCSI_OPCODE_LOGIN_REQUEST (3) +#define ISCSI_OPCODE_TEXT_REQUEST (4) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ #define ISCSI_OPCODE_NOP_IN (0x20) @@ -172,17 +160,23 @@ struct iscsi_async_msg_hdr { struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 opcode; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 @@ -790,9 +784,9 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_PRS_ERRORS, + ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, @@ -1304,22 +1298,6 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_total_pdu_cnt; }; -struct iscsi_db_data { - u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 sq_prod; -}; - struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; @@ -1398,5 +1376,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; #endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 72c770f9f666..a9b3050f469c 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -42,7 +42,7 @@ #define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4) -#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 866f063026de..fe6a33e45977 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -37,6 +37,8 @@ #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index a5e843268f0e..dbf7a43c3e1f 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -111,7 +111,6 @@ struct tcp_offload_params { __le32 snd_wnd; __le32 rcv_wnd; __le32 snd_wl1; - __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; @@ -122,7 +121,7 @@ struct tcp_offload_params { u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; - __le16 reserved2; + __le16 fw_internal; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; @@ -130,7 +129,7 @@ struct tcp_offload_params { u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 ts_ticks_per_second; + __le32 reserved3[2]; }; struct tcp_offload_params_opt2 { -- cgit v1.2.3 From 9617813dba5b6c112922c60cd2bc57c6e11ae907 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:37 +0200 Subject: skbuff: add stub to help computing crc32c on SCTP packets sctp_compute_checksum requires crc32c symbol (provided by libcrc32c), so it can't be used in net core. Like it has been done previously with other symbols (e.g. ipv6_dst_lookup), introduce a stub struct skb_checksum_ops to allow computation of crc32c checksum in net core after sctp.ko (and thus libcrc32c) has been loaded. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 ++ net/core/skbuff.c | 26 ++++++++++++++++++++++++++ net/sctp/offload.c | 6 ++++++ 3 files changed, 34 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7c0cb2ce8b01..b1f46a0d18e2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3076,6 +3076,8 @@ struct skb_checksum_ops { __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); }; +extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; + __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 346d3e85dfbc..d5c98117cbce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2243,6 +2243,32 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, } EXPORT_SYMBOL(skb_copy_and_csum_bits); +static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) +{ + net_warn_ratelimited( + "%s: attempt to compute crc32c without libcrc32c.ko\n", + __func__); + return 0; +} + +static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, + int offset, int len) +{ + net_warn_ratelimited( + "%s: attempt to compute crc32c without libcrc32c.ko\n", + __func__); + return 0; +} + +static const struct skb_checksum_ops default_crc32c_ops = { + .update = warn_crc32c_csum_update, + .combine = warn_crc32c_csum_combine, +}; + +const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = + &default_crc32c_ops; +EXPORT_SYMBOL(crc32c_csum_stub); + /** * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() * @from: source buffer diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 4f5a2b580aa5..b67198429db5 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -98,6 +98,11 @@ static const struct net_offload sctp6_offload = { }, }; +static const struct skb_checksum_ops crc32c_csum_ops = { + .update = sctp_csum_update, + .combine = sctp_csum_combine, +}; + int __init sctp_offload_init(void) { int ret; @@ -110,6 +115,7 @@ int __init sctp_offload_init(void) if (ret) goto ipv4; + crc32c_csum_stub = &crc32c_csum_ops; return ret; ipv4: -- cgit v1.2.3 From b72b5bf6a8fc9065f270ae135bbd47abb9d96790 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:38 +0200 Subject: net: introduce skb_crc32c_csum_help skb_crc32c_csum_help is like skb_checksum_help, but it is designed for checksumming SCTP packets using crc32c (see RFC3309), provided that libcrc32c.ko has been loaded before. In case libcrc32c is not loaded, invoking skb_crc32c_csum_help on a skb results in one the following printouts: warn_crc32c_csum_update: attempt to compute crc32c without libcrc32c.ko warn_crc32c_csum_combine: attempt to compute crc32c without libcrc32c.ko Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + include/linux/skbuff.h | 3 ++- net/core/dev.c | 41 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0150b2dd3031..abbc72e09f11 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3931,6 +3931,7 @@ void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b1f46a0d18e2..62d62964c743 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -193,7 +193,8 @@ * accordingly. Note the there is no indication in the skbuff that the * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports * both IP checksum offload and SCTP CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * is configured for a packet presumably by inspecting packet headers; in + * case, skb_crc32c_csum_help is provided to compute CRC on SCTP packets. * * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack diff --git a/net/core/dev.c b/net/core/dev.c index acd594c56f0a..8356d5f05f89 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -143,6 +143,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -2612,6 +2613,46 @@ out: } EXPORT_SYMBOL(skb_checksum_help); +int skb_crc32c_csum_help(struct sk_buff *skb) +{ + __le32 crc32c_csum; + int ret = 0, offset, start; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto out; + + if (unlikely(skb_is_gso(skb))) + goto out; + + /* Before computing a checksum, we should make sure no frag could + * be modified by an external entity : checksum could be wrong. + */ + if (unlikely(skb_has_shared_frag(skb))) { + ret = __skb_linearize(skb); + if (ret) + goto out; + } + start = skb_checksum_start_offset(skb); + offset = start + offsetof(struct sctphdr, checksum); + if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { + ret = -EINVAL; + goto out; + } + if (skb_cloned(skb) && + !skb_clone_writable(skb, offset + sizeof(__le32))) { + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (ret) + goto out; + } + crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, + skb->len - start, ~(__u32)0, + crc32c_csum_stub)); + *(__le32 *)(skb->data + offset) = crc32c_csum; + skb->ip_summed = CHECKSUM_NONE; +out: + return ret; +} + __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { __be16 type = skb->protocol; -- cgit v1.2.3 From 219f1d79871257e9603f504dce0fe8ebf47aad08 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:39 +0200 Subject: sk_buff: remove support for csum_bad in sk_buff This bit was introduced with commit 5a21232983aa ("net: Support for csum_bad in skbuff") to reduce the stack workload when processing RX packets carrying a wrong Internet Checksum. Up to now, only one driver and GRO core are setting it. Suggested-by: Tom Herbert Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 2 +- include/linux/netdevice.h | 4 +--- include/linux/skbuff.h | 23 ++--------------------- net/bridge/netfilter/nft_reject_bridge.c | 5 +---- net/core/dev.c | 3 --- net/ipv4/netfilter/nf_reject_ipv4.c | 2 +- net/ipv6/netfilter/nf_reject_ipv6.c | 3 --- 7 files changed, 6 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 3a8a4aa13687..9a0817938eca 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -223,7 +223,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) skb->protocol = eth_type_trans(skb, ndev); if (unlikely(buff->is_cso_err)) { ++self->stats.rx.errors; - __skb_mark_checksum_bad(skb); + skb->ip_summed = CHECKSUM_NONE; } else { if (buff->is_ip_cso) { __skb_incr_checksum_unnecessary(skb); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index abbc72e09f11..c1611ace5336 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2573,9 +2573,7 @@ static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ - if (__ret) \ - __skb_mark_checksum_bad(skb); \ - else \ + if (!__ret) \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 62d62964c743..c38f890d425e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -685,7 +685,7 @@ struct sk_buff { __u8 csum_valid:1; __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_bad:1; + __u8 __csum_bad_unused:1; /* one bit hole */ __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE @@ -3336,21 +3336,6 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } -static inline void __skb_mark_checksum_bad(struct sk_buff *skb) -{ - /* Mark current checksum as bad (typically called from GRO - * path). In the case that ip_summed is CHECKSUM_NONE - * this must be the first checksum encountered in the packet. - * When ip_summed is CHECKSUM_UNNECESSARY, this is the first - * checksum after the last one validated. For UDP, a zero - * checksum can not be marked as bad. - */ - - if (skb->ip_summed == CHECKSUM_NONE || - skb->ip_summed == CHECKSUM_UNNECESSARY) - skb->csum_bad = 1; -} - /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -3404,9 +3389,6 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } - } else if (skb->csum_bad) { - /* ip_summed == CHECKSUM_NONE in this case */ - return (__force __sum16)1; } skb->csum = psum; @@ -3466,8 +3448,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) static inline bool __skb_checksum_convert_check(struct sk_buff *skb) { - return (skb->ip_summed == CHECKSUM_NONE && - skb->csum_valid && !skb->csum_bad); + return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } static inline void __skb_checksum_convert(struct sk_buff *skb, diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 346ef6b00b8f..c16dd3a47fc6 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -111,7 +111,7 @@ static void nft_reject_br_send_v4_unreach(struct net *net, __wsum csum; u8 proto; - if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) + if (!nft_bridge_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ @@ -226,9 +226,6 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) __be16 fo; u8 proto = ip6h->nexthdr; - if (skb->csum_bad) - return false; - if (skb_csum_unnecessary(skb)) return true; diff --git a/net/core/dev.c b/net/core/dev.c index 8356d5f05f89..f0281ff45e77 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4678,9 +4678,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (netif_elide_gro(skb->dev)) goto normal; - if (skb->csum_bad) - goto normal; - gro_list_prepare(napi, skb); rcu_read_lock(); diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 7cd8d0d918f8..6f8d9e5e062b 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -172,7 +172,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) struct iphdr *iph = ip_hdr(skb_in); u8 proto; - if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET)) + if (iph->frag_off & htons(IP_OFFSET)) return; if (skb_csum_unnecessary(skb_in)) { diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index eedee5d108d9..f63b18e05c69 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -220,9 +220,6 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook) __be16 fo; u8 proto; - if (skb->csum_bad) - return false; - if (skb_csum_unnecessary(skb)) return true; -- cgit v1.2.3 From dba003067a43a9699bef0c4bdbe320ece5a109b8 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:40 +0200 Subject: net: use skb->csum_not_inet to identify packets needing crc32c skb->csum_not_inet carries the indication on which algorithm is needed to compute checksum on skb in the transmit path, when skb->ip_summed is equal to CHECKSUM_PARTIAL. If skb carries a SCTP packet and crc32c hasn't been yet written in L4 header, skb->csum_not_inet is assigned to 1; otherwise, assume Internet Checksum is needed and thus set skb->csum_not_inet to 0. Suggested-by: Tom Herbert Signed-off-by: Davide Caratti Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 16 +++++++++------- net/core/dev.c | 1 + net/sched/act_csum.c | 1 + net/sctp/offload.c | 1 + net/sctp/output.c | 1 + 5 files changed, 13 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c38f890d425e..a43d2086bb7f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -189,12 +189,13 @@ * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports - * both IP checksum offload and SCTP CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers; in - * case, skb_crc32c_csum_help is provided to compute CRC on SCTP packets. + * will set set csum_start and csum_offset accordingly, set ip_summed to + * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in + * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. + * A driver that supports both IP checksum offload and SCTP CRC32c offload + * must verify which offload is configured for a packet by testing the + * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve + * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. * * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack @@ -557,6 +558,7 @@ typedef unsigned char *sk_buff_data_t; * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking @@ -685,7 +687,7 @@ struct sk_buff { __u8 csum_valid:1; __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 __csum_bad_unused:1; /* one bit hole */ + __u8 csum_not_inet:1; __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE diff --git a/net/core/dev.c b/net/core/dev.c index f0281ff45e77..71107d1f3051 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2649,6 +2649,7 @@ int skb_crc32c_csum_help(struct sk_buff *skb) crc32c_csum_stub)); *(__le32 *)(skb->data + offset) = crc32c_csum; skb->ip_summed = CHECKSUM_NONE; + skb->csum_not_inet = 0; out: return ret; } diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index ab6fdbd34db7..3317a2f579da 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -350,6 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, sctph->checksum = sctp_compute_cksum(skb, skb_network_offset(skb) + ihl); skb->ip_summed = CHECKSUM_NONE; + skb->csum_not_inet = 0; return 1; } diff --git a/net/sctp/offload.c b/net/sctp/offload.c index b67198429db5..275925b93b29 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -35,6 +35,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb) { skb->ip_summed = CHECKSUM_NONE; + skb->csum_not_inet = 0; return sctp_compute_cksum(skb, skb_transport_offset(skb)); } diff --git a/net/sctp/output.c b/net/sctp/output.c index 1409a875ad8e..e2edf2ebbade 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -538,6 +538,7 @@ merge: } else { chksum: head->ip_summed = CHECKSUM_PARTIAL; + head->csum_not_inet = 1; head->csum_start = skb_transport_header(head) - head->head; head->csum_offset = offsetof(struct sctphdr, checksum); } -- cgit v1.2.3 From 43c26a1a45938624fb9301e8bf7dfabbed293619 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:41 +0200 Subject: net: more accurate checksumming in validate_xmit_skb() skb_csum_hwoffload_help() uses netdev features and skb->csum_not_inet to determine if skb needs software computation of Internet Checksum or crc32c (or nothing, if this computation can be done by the hardware). Use it in place of skb_checksum_help() in validate_xmit_skb() to avoid corruption of non-GSO SCTP packets having skb->ip_summed equal to CHECKSUM_PARTIAL. While at it, remove references to skb_csum_off_chk* functions, since they are not present anymore in Linux _ see commit cf53b1da73bd ("Revert "net: Add driver helper functions to determine checksum offloadability""). Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- Documentation/networking/checksum-offloads.txt | 11 +++++++---- include/linux/netdevice.h | 3 +++ include/linux/skbuff.h | 13 +++++-------- net/core/dev.c | 14 ++++++++++++-- 4 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt index 56e36861245f..d52d191bbb0c 100644 --- a/Documentation/networking/checksum-offloads.txt +++ b/Documentation/networking/checksum-offloads.txt @@ -35,6 +35,9 @@ This interface only allows a single checksum to be offloaded. Where encapsulation is used, the packet may have multiple checksum fields in different header layers, and the rest will have to be handled by another mechanism such as LCO or RCO. +CRC32c can also be offloaded using this interface, by means of filling + skb->csum_start and skb->csum_offset as described above, and setting + skb->csum_not_inet: see skbuff.h comment (section 'D') for more details. No offloading of the IP header checksum is performed; it is always done in software. This is OK because when we build the IP header, we obviously have it in cache, so summing it isn't expensive. It's also rather short. @@ -49,9 +52,9 @@ A driver declares its offload capabilities in netdev->hw_features; see and csum_offset given in the SKB; if it tries to deduce these itself in hardware (as some NICs do) the driver should check that the values in the SKB match those which the hardware will deduce, and if not, fall back to - checksumming in software instead (with skb_checksum_help or one of the - skb_csum_off_chk* functions as mentioned in include/linux/skbuff.h). This - is a pain, but that's what you get when hardware tries to be clever. + checksumming in software instead (with skb_csum_hwoffload_help() or one of + the skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in + include/linux/skbuff.h). The stack should, for the most part, assume that checksum offload is supported by the underlying device. The only place that should check is @@ -60,7 +63,7 @@ The stack should, for the most part, assume that checksum offload is may include other offloads besides TX Checksum Offload) and, if they are not supported or enabled on the device (determined by netdev->features), performs the corresponding offload in software. In the case of TX - Checksum Offload, that means calling skb_checksum_help(skb). + Checksum Offload, that means calling skb_csum_hwoffload_help(skb, features). LCO: Local Checksum Offload diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c1611ace5336..f8f7cd52a0a0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3930,6 +3930,9 @@ void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a43d2086bb7f..43d7ca07b2ff 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -162,14 +162,11 @@ * * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate - * checksum offload capability. If a device has limited checksum capabilities - * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as - * described above) a helper function can be called to resolve - * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper - * function takes a spec argument that describes the protocol layer that is - * supported for checksum offload and can be called for each packet. If a - * packet does not match the specification for offload, skb_checksum_help - * is called to resolve the checksum. + * checksum offload capability. + * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based + * on network device checksumming capabilities: if a packet does not match + * them, skb_checksum_help or skb_crc32c_help (depending on the value of + * csum_not_inet, see item D.) is called to resolve the checksum. * * CHECKSUM_NONE: * diff --git a/net/core/dev.c b/net/core/dev.c index 71107d1f3051..bb136f726890 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2996,6 +2996,17 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, return skb; } +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features) +{ + if (unlikely(skb->csum_not_inet)) + return !!(features & NETIF_F_SCTP_CRC) ? 0 : + skb_crc32c_csum_help(skb); + + return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); +} +EXPORT_SYMBOL(skb_csum_hwoffload_help); + static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) { netdev_features_t features; @@ -3034,8 +3045,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device else skb_set_transport_header(skb, skb_checksum_start_offset(skb)); - if (!(features & NETIF_F_CSUM_MASK) && - skb_checksum_help(skb)) + if (skb_csum_hwoffload_help(skb, features)) goto out_kfree_skb; } } -- cgit v1.2.3 From b4759dcdcd8466e70f01ff07f33e17cd93131d34 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 18 May 2017 15:44:43 +0200 Subject: sk_buff.h: improve description of CHECKSUM_{COMPLETE, UNNECESSARY} Add FCoE to the list of protocols that can set CHECKSUM_UNNECESSARY; add a note to CHECKSUM_COMPLETE section to specify that it does not apply to SCTP and FCoE protocols. Suggested-by: Tom Herbert Signed-off-by: Davide Caratti Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 43d7ca07b2ff..1713e4b7ea9f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -109,6 +109,7 @@ * may perform further validation in this case. * GRE: only if the checksum is present in the header. * SCTP: indicates the CRC in SCTP header has been validated. + * FCOE: indicates the CRC in FC frame has been validated. * * skb->csum_level indicates the number of consecutive checksums found in * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. @@ -126,8 +127,10 @@ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the * hardware doesn't need to parse L3/L4 headers to implement this. * - * Note: Even if device supports only some protocols, but is able to produce - * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * Notes: + * - Even if device supports only some protocols, but is able to produce + * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. * * CHECKSUM_PARTIAL: * -- cgit v1.2.3 From 32d0f7830d9be5b1652a718e050d808b4908155f Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Thu, 18 May 2017 15:13:43 -0700 Subject: phy: Add helper function to check phy interface mode Added helper function that checks phy_mode is RGMII (all variants) 'bool phy_interface_mode_is_rgmii(phy_interface_t mode)' Changed the following function, to use the above. 'bool phy_interface_is_rgmii(struct phy_device *phydev)' Signed-off-by: Iyappan Subramanian Suggested-by: Florian Fainelli Suggested-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/phy.h b/include/linux/phy.h index 54ef45823fc1..5a808a26e4cf 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -715,6 +715,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) return phydev->is_internal; } +/** + * phy_interface_mode_is_rgmii - Convenience function for testing if a + * PHY interface mode is RGMII (all variants) + * @mode: the phy_interface_t enum + */ +static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) +{ + return mode >= PHY_INTERFACE_MODE_RGMII && + mode <= PHY_INTERFACE_MODE_RGMII_TXID; +}; + /** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) @@ -722,8 +733,7 @@ static inline bool phy_is_internal(struct phy_device *phydev) */ static inline bool phy_interface_is_rgmii(struct phy_device *phydev) { - return phydev->interface >= PHY_INTERFACE_MODE_RGMII && - phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; + return phy_interface_mode_is_rgmii(phydev->interface); }; /* -- cgit v1.2.3 From 4123109050a869a8871e58a50f28f383d41e49ad Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 5 May 2017 16:13:02 -0700 Subject: nvme-fc: correct port role bits FC Port roles is a bit mask, not individual values. Correct nvme definitions to unique bits. Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/nvme-fc-driver.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 0db37158a61d..12e344b5b77f 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -27,8 +27,8 @@ /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ #define FC_PORT_ROLE_NVME_INITIATOR 0x10 -#define FC_PORT_ROLE_NVME_TARGET 0x11 -#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 +#define FC_PORT_ROLE_NVME_TARGET 0x20 +#define FC_PORT_ROLE_NVME_DISCOVERY 0x40 /** -- cgit v1.2.3 From 4b8ba5fa525bc8bdaaed2a5c5433f0f2008d7bc5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 25 Apr 2017 16:23:09 -0700 Subject: nvmet-fc: remove target cpu scheduling flag Remove NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED. It's unnecessary. Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/target/fc.c | 4 +--- drivers/nvme/target/fcloop.c | 1 - drivers/scsi/lpfc/lpfc_nvmet.c | 1 - include/linux/nvme-fc-driver.h | 12 ++---------- 4 files changed, 3 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 62eba29c85fb..2006fae61980 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) { int cpu, idx, cnt; - if (!(tgtport->ops->target_features & - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) || - tgtport->ops->max_hw_queues == 1) + if (tgtport->ops->max_hw_queues == 1) return WORK_CPU_UNBOUND; /* Simple cpu selection based on qid modulo active cpu count */ diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 15551ef79c8c..294a6611fb24 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = { .dma_boundary = FCLOOP_DMABOUND_4G, /* optional features */ .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | NVMET_FCTGTFEAT_OPDONE_IN_ISR, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct fcloop_tport), diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 94434e621c33..0488580eea12 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -764,7 +764,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | NVMET_FCTGTFEAT_CMD_IN_ISR | NVMET_FCTGTFEAT_OPDONE_IN_ISR; diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 12e344b5b77f..6c8c5d8041b7 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -642,15 +642,7 @@ enum { * sequence in one LLDD operation. Errors during Data * sequence transmit must not allow RSP sequence to be sent. */ - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), - /* Bit 1: When 0, the LLDD will deliver FCP CMD - * on the CPU it should be affinitized to. Thus work will - * be scheduled on the cpu received on. When 1, the LLDD - * may not deliver the CMD on the CPU it should be worked - * on. The transport should pick a cpu to schedule the work - * on. - */ - NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2), + NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), /* Bit 2: When 0, the LLDD is calling the cmd rcv handler * in a non-isr context, allowing the transport to finish * op completion in the calling context. When 1, the LLDD @@ -658,7 +650,7 @@ enum { * requiring the transport to transition to a workqueue * for op completion. */ - NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), + NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), /* Bit 3: When 0, the LLDD is calling the op done handler * in a non-isr context, allowing the transport to finish * op completion in the calling context. When 1, the LLDD -- cgit v1.2.3 From b8210a9e4bea6354eccc5d8a50ecc21ea7486dc9 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Fri, 19 May 2017 17:52:35 +0200 Subject: net: define receive timestamp filter for NTP Add HWTSTAMP_FILTER_NTP_ALL to the hwtstamp_rx_filters enum for timestamping of NTP packets. There is currently only one driver (phyter) that could support it directly. CC: Richard Cochran CC: Willem de Bruijn Signed-off-by: Miroslav Lichvar Signed-off-by: David S. Miller --- include/uapi/linux/net_tstamp.h | 3 +++ net/core/dev_ioctl.c | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 464dcca5ed68..0749fb13e517 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -125,6 +125,9 @@ enum hwtstamp_rx_filters { HWTSTAMP_FILTER_PTP_V2_SYNC, /* PTP v2/802.AS1, any layer, Delay_req packet */ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, + + /* NTP, UDP, all versions and packet modes */ + HWTSTAMP_FILTER_NTP_ALL, }; #endif /* _NET_TIMESTAMPING_H */ diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b94b1d293506..8f036a76b92e 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -227,6 +227,8 @@ static int net_hwtstamp_validate(struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: rx_filter_valid = 1; break; + case HWTSTAMP_FILTER_NTP_ALL: + break; } if (!tx_type_valid || !rx_filter_valid) -- cgit v1.2.3 From 90b602f80397657429373ca009f98aec4dd3c553 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Fri, 19 May 2017 17:52:37 +0200 Subject: net: add function to retrieve original skb device using NAPI ID Since commit b68581778cd0 ("net: Make skb->skb_iif always track skb->dev") skbs don't have the original index of the interface which received the packet. This information is now needed for a new control message related to hardware timestamping. Instead of adding a new field to skb, we can find the device by the NAPI ID if it is available, i.e. CONFIG_NET_RX_BUSY_POLL is enabled and the driver is using NAPI. Add dev_get_by_napi_id() and also skb_napi_id() to hide the CONFIG_NET_RX_BUSY_POLL ifdef. CC: Richard Cochran Suggested-by: Willem de Bruijn Acked-by: Willem de Bruijn Signed-off-by: Miroslav Lichvar Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + include/linux/skbuff.h | 9 +++++++++ net/core/dev.c | 26 ++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f8f7cd52a0a0..c50c9218e31e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2456,6 +2456,7 @@ static inline int dev_recursion_level(void) struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1713e4b7ea9f..8acce7143f6a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -858,6 +858,15 @@ static inline bool skb_pkt_type_ok(u32 ptype) return ptype <= PACKET_OTHERHOST; } +static inline unsigned int skb_napi_id(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + return skb->napi_id; +#else + return 0; +#endif +} + void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index bb136f726890..3d98fbf4cbb0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -163,6 +163,7 @@ static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, struct netdev_notifier_info *info); +static struct napi_struct *napi_by_id(unsigned int napi_id); /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl @@ -866,6 +867,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) } EXPORT_SYMBOL(dev_get_by_index); +/** + * dev_get_by_napi_id - find a device by napi_id + * @napi_id: ID of the NAPI struct + * + * Search for an interface by NAPI ID. Returns %NULL if the device + * is not found or a pointer to the device. The device has not had + * its reference counter increased so the caller must be careful + * about locking. The caller must hold RCU lock. + */ + +struct net_device *dev_get_by_napi_id(unsigned int napi_id) +{ + struct napi_struct *napi; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (napi_id < MIN_NAPI_ID) + return NULL; + + napi = napi_by_id(napi_id); + + return napi ? napi->dev : NULL; +} +EXPORT_SYMBOL(dev_get_by_napi_id); + /** * netdev_get_name - get a netdevice name, knowing its ifindex. * @net: network namespace -- cgit v1.2.3 From aad9c8c470f2a8321a99eb053630ce0e199558d6 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Fri, 19 May 2017 17:52:38 +0200 Subject: net: add new control message for incoming HW-timestamped packets Add SOF_TIMESTAMPING_OPT_PKTINFO option to request a new control message for incoming packets with hardware timestamps. It contains the index of the real interface which received the packet and the length of the packet at layer 2. The index is useful with bonding, bridges and other interfaces, where IP_PKTINFO doesn't allow applications to determine which PHC made the timestamp. With the L2 length (and link speed) it is possible to transpose preamble timestamps to trailer timestamps, which are used in the NTP protocol. While this information could be provided by two new socket options independently from timestamping, it doesn't look like they would be very useful. With this option any performance impact is limited to hardware timestamping. Use dev_get_by_napi_id() to get the device and its index. On kernels with disabled CONFIG_NET_RX_BUSY_POLL or drivers not using NAPI, a zero index will be returned in the control message. CC: Richard Cochran Acked-by: Willem de Bruijn Signed-off-by: Miroslav Lichvar Signed-off-by: David S. Miller --- Documentation/networking/timestamping.txt | 10 ++++++++++ include/uapi/asm-generic/socket.h | 2 ++ include/uapi/linux/net_tstamp.h | 11 ++++++++++- net/socket.c | 27 ++++++++++++++++++++++++++- 4 files changed, 48 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 96f50694a748..ce11e3a08c0d 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -193,6 +193,16 @@ SOF_TIMESTAMPING_OPT_STATS: the transmit timestamps, such as how long a certain block of data was limited by peer's receiver window. +SOF_TIMESTAMPING_OPT_PKTINFO: + + Enable the SCM_TIMESTAMPING_PKTINFO control message for incoming + packets with hardware timestamps. The message contains struct + scm_ts_pktinfo, which supplies the index of the real interface which + received the packet and its length at layer 2. A valid (non-zero) + interface index will be returned only if CONFIG_NET_RX_BUSY_POLL is + enabled and the driver is using NAPI. The struct contains also two + other fields, but they are reserved and undefined. + New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate regardless of the setting of sysctl net.core.tstamp_allow_data. diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 2b488565599d..a5f6e819fafd 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -100,4 +100,6 @@ #define SO_COOKIE 57 +#define SCM_TIMESTAMPING_PKTINFO 58 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 0749fb13e517..dee74d39da94 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -9,6 +9,7 @@ #ifndef _NET_TIMESTAMPING_H #define _NET_TIMESTAMPING_H +#include #include /* for SO_TIMESTAMPING */ /* SO_TIMESTAMPING gets an integer bit field comprised of these values */ @@ -26,8 +27,9 @@ enum { SOF_TIMESTAMPING_OPT_CMSG = (1<<10), SOF_TIMESTAMPING_OPT_TSONLY = (1<<11), SOF_TIMESTAMPING_OPT_STATS = (1<<12), + SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_STATS, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_PKTINFO, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -130,4 +132,11 @@ enum hwtstamp_rx_filters { HWTSTAMP_FILTER_NTP_ALL, }; +/* SCM_TIMESTAMPING_PKTINFO control message */ +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + #endif /* _NET_TIMESTAMPING_H */ diff --git a/net/socket.c b/net/socket.c index c2564eb25c6b..67db7d8a3b81 100644 --- a/net/socket.c +++ b/net/socket.c @@ -662,6 +662,27 @@ static bool skb_is_err_queue(const struct sk_buff *skb) return skb->pkt_type == PACKET_OUTGOING; } +static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb) +{ + struct scm_ts_pktinfo ts_pktinfo; + struct net_device *orig_dev; + + if (!skb_mac_header_was_set(skb)) + return; + + memset(&ts_pktinfo, 0, sizeof(ts_pktinfo)); + + rcu_read_lock(); + orig_dev = dev_get_by_napi_id(skb_napi_id(skb)); + if (orig_dev) + ts_pktinfo.if_index = orig_dev->ifindex; + rcu_read_unlock(); + + ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb); + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_PKTINFO, + sizeof(ts_pktinfo), &ts_pktinfo); +} + /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ @@ -699,8 +720,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && - ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) + ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) { empty = 0; + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && + !skb_is_err_queue(skb)) + put_ts_pktinfo(msg, skb); + } if (!empty) { put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(tss), &tss); -- cgit v1.2.3 From b50a5c70ffa4fd6b6da324ab54c84adf48fb17d9 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Fri, 19 May 2017 17:52:40 +0200 Subject: net: allow simultaneous SW and HW transmit timestamping Add SOF_TIMESTAMPING_OPT_TX_SWHW option to allow an outgoing packet to be looped to the socket's error queue with a software timestamp even when a hardware transmit timestamp is expected to be provided by the driver. Applications using this option will receive two separate messages from the error queue, one with a software timestamp and the other with a hardware timestamp. As the hardware timestamp is saved to the shared skb info, which may happen before the first message with software timestamp is received by the application, the hardware timestamp is copied to the SCM_TIMESTAMPING control message only when the skb has no software timestamp or it is an incoming packet. While changing sw_tx_timestamp(), inline it in skb_tx_timestamp() as there are no other users. CC: Richard Cochran CC: Willem de Bruijn Signed-off-by: Miroslav Lichvar Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- Documentation/networking/timestamping.txt | 8 ++++++++ include/linux/skbuff.h | 10 ++-------- include/uapi/linux/net_tstamp.h | 3 ++- net/core/skbuff.c | 4 ++++ net/socket.c | 20 ++++++++++++++++++-- 5 files changed, 34 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 50eb0e554778..196ba17cc344 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -203,6 +203,14 @@ SOF_TIMESTAMPING_OPT_PKTINFO: enabled and the driver is using NAPI. The struct contains also two other fields, but they are reserved and undefined. +SOF_TIMESTAMPING_OPT_TX_SWHW: + + Request both hardware and software timestamps for outgoing packets + when SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE + are enabled at the same time. If both timestamps are generated, + two separate messages will be looped to the socket's error queue, + each containing just one timestamp. + New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate regardless of the setting of sysctl net.core.tstamp_allow_data. diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8acce7143f6a..45a59c1e0cc7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3259,13 +3259,6 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); -static inline void sw_tx_timestamp(struct sk_buff *skb) -{ - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && - !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - skb_tstamp_tx(skb, NULL); -} - /** * skb_tx_timestamp() - Driver hook for transmit timestamping * @@ -3281,7 +3274,8 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - sw_tx_timestamp(skb); + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + skb_tstamp_tx(skb, NULL); } /** diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index dee74d39da94..3d421d912193 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -28,8 +28,9 @@ enum { SOF_TIMESTAMPING_OPT_TSONLY = (1<<11), SOF_TIMESTAMPING_OPT_STATS = (1<<12), SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), + SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_PKTINFO, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d5c98117cbce..780b7c1563d0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3901,6 +3901,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (!sk) return; + if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && + skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) + return; + tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; if (!skb_may_tx_timestamp(sk, tsonly)) return; diff --git a/net/socket.c b/net/socket.c index 67db7d8a3b81..cb355a7ef135 100644 --- a/net/socket.c +++ b/net/socket.c @@ -662,6 +662,19 @@ static bool skb_is_err_queue(const struct sk_buff *skb) return skb->pkt_type == PACKET_OUTGOING; } +/* On transmit, software and hardware timestamps are returned independently. + * As the two skb clones share the hardware timestamp, which may be updated + * before the software timestamp is received, a hardware TX timestamp may be + * returned only if there is no software TX timestamp. Ignore false software + * timestamps, which may be made in the __sock_recv_timestamp() call when the + * option SO_TIMESTAMP(NS) is enabled on the socket, even when the skb has a + * hardware timestamp. + */ +static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp) +{ + return skb->tstamp && !false_tstamp && skb_is_err_queue(skb); +} + static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct scm_ts_pktinfo ts_pktinfo; @@ -691,14 +704,16 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct scm_timestamping tss; - int empty = 1; + int empty = 1, false_tstamp = 0; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ - if (need_software_tstamp && skb->tstamp == 0) + if (need_software_tstamp && skb->tstamp == 0) { __net_timestamp(skb); + false_tstamp = 1; + } if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { @@ -720,6 +735,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && + !skb_is_swtx_tstamp(skb, false_tstamp) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) { empty = 0; if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && -- cgit v1.2.3 From b4d2ea2af95cb77e2f320e24da526280d4aa2f6b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 8 May 2017 10:48:21 +0200 Subject: Revert "pinctrl: generic: Add bi-directional and output-enable" This reverts commit 8c58f1a7a4b6d1d723bf25fef9d842d5a11200d0. It turns out that applying these generic properties was premature: the properties used in the driver using this are of unclear electrical nature and the subject need to be discussed. Signed-off-by: Linus Walleij --- Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt | 2 -- drivers/pinctrl/pinconf-generic.c | 3 --- include/linux/pinctrl/pinconf-generic.h | 3 --- 3 files changed, 8 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt index 71a3c134af1b..f01d154090da 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt @@ -247,7 +247,6 @@ bias-bus-hold - latch weakly bias-pull-up - pull up the pin bias-pull-down - pull down the pin bias-pull-pin-default - use pin-default pull state -bi-directional - pin supports simultaneous input/output operations drive-push-pull - drive actively high and low drive-open-drain - drive with open drain drive-open-source - drive with open source @@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X power-source - select between different power supplies low-power-enable - enable low power mode low-power-disable - disable low power mode -output-enable - enable output on pin regardless of output value output-low - set the pin to output mode with low level output-high - set the pin to output mode with high level slew-rate - set the slew rate diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 0d6b7f4b82af..720a19fd38d2 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, "input bias pull to pin specific state", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), - PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), @@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = { { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, - { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 }, { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, @@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = { { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, - { "output-enable", PIN_CONFIG_OUTPUT, 1, }, { "output-high", PIN_CONFIG_OUTPUT, 1, }, { "output-low", PIN_CONFIG_OUTPUT, 0, }, { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 279e3c5326e3..7620eb127cff 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -42,8 +42,6 @@ * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high * impedance to VDD). If the argument is != 0 pull-up is enabled, * if it is 0, pull-up is total, i.e. the pin is connected to VDD. - * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous - * input and output operations. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports * which are then pulled up with an external resistor. Setting this @@ -98,7 +96,6 @@ enum pin_config_param { PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, PIN_CONFIG_BIAS_PULL_UP, - PIN_CONFIG_BIDIRECTIONAL, PIN_CONFIG_DRIVE_OPEN_DRAIN, PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, -- cgit v1.2.3 From 020e0b1c8f19f1fc3bce23beeccd80c574ca0e49 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Thu, 11 May 2017 20:24:31 +0200 Subject: gpiolib: Add stubs for gpiod lookup table interface Add stubs for gpiod_add_lookup_table() and gpiod_remove_lookup_table() for the !GPIOLIB case to prevent build errors. Signed-off-by: Anatolij Gustschin Reviewed-by: Andy Shevchenko Signed-off-by: Linus Walleij --- include/linux/gpio/machine.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index c0d712d22b07..f738d50cc17d 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -56,7 +56,14 @@ struct gpiod_lookup_table { .flags = _flags, \ } +#ifdef CONFIG_GPIOLIB void gpiod_add_lookup_table(struct gpiod_lookup_table *table); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); +#else +static inline +void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} +static inline +void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} +#endif #endif /* __LINUX_GPIO_MACHINE_H */ -- cgit v1.2.3 From c2372c20425bd75a5527b3e2204059762190f6ca Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 22 May 2017 13:09:20 +0200 Subject: of/platform: Make of_platform_device_destroy globally visible of_platform_device_destroy is the counterpart to of_platform_device_create which is a non-static function. After creating a platform device it might be neccessary to destroy it to deal with -EPROBE_DEFER where a repeated of_platform_device_create call would fail otherwise. Therefore also make of_platform_device_destroy globally visible. Signed-off-by: Jan Glauber Acked-by: Rob Herring Signed-off-by: Ulf Hansson --- drivers/of/platform.c | 3 ++- include/linux/of_platform.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 71fecc2debfc..703a42118ffc 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void) arch_initcall_sync(of_platform_default_populate_init); #endif -static int of_platform_device_destroy(struct device *dev, void *data) +int of_platform_device_destroy(struct device *dev, void *data) { /* Do not touch devices not populated from the device tree */ if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) @@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } +EXPORT_SYMBOL_GPL(of_platform_device_destroy); /** * of_platform_depopulate() - Remove devices populated from device tree diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index dc8224ae28d5..e0d1946270f3 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent); +extern int of_platform_device_destroy(struct device *dev, void *data); extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); -- cgit v1.2.3 From 6d8422a175ccf2846d9460ed2b6228fe0b12c243 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 21 May 2017 10:12:02 -0600 Subject: net: ipv4: Plumb extack through route add functions Plumb extack argument down to route add functions. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip_fib.h | 3 ++- net/ipv4/fib_frontend.c | 16 +++++++++------- net/ipv4/fib_lookup.h | 3 ++- net/ipv4/fib_semantics.c | 22 +++++++++++++--------- net/ipv4/fib_trie.c | 4 ++-- 5 files changed, 28 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6692c5758b33..42e8b8f55f7c 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -263,7 +263,8 @@ struct fib_table { int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags); -int fib_table_insert(struct net *, struct fib_table *, struct fib_config *); +int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, + struct netlink_ext_ack *extack); int fib_table_delete(struct net *, struct fib_table *, struct fib_config *); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 83e3ed258467..511edff76c01 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -594,7 +594,8 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg) } else { tb = fib_new_table(net, cfg.fc_table); if (tb) - err = fib_table_insert(net, tb, &cfg); + err = fib_table_insert(net, tb, + &cfg, NULL); else err = -ENOBUFS; } @@ -626,14 +627,15 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = { }; static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, - struct nlmsghdr *nlh, struct fib_config *cfg) + struct nlmsghdr *nlh, struct fib_config *cfg, + struct netlink_ext_ack *extack) { struct nlattr *attr; int err, remaining; struct rtmsg *rtm; err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy, - NULL); + extack); if (err < 0) goto errout; @@ -718,7 +720,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib_table *tb; int err; - err = rtm_to_fib_config(net, skb, nlh, &cfg); + err = rtm_to_fib_config(net, skb, nlh, &cfg, extack); if (err < 0) goto errout; @@ -741,7 +743,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib_table *tb; int err; - err = rtm_to_fib_config(net, skb, nlh, &cfg); + err = rtm_to_fib_config(net, skb, nlh, &cfg, extack); if (err < 0) goto errout; @@ -751,7 +753,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - err = fib_table_insert(net, tb, &cfg); + err = fib_table_insert(net, tb, &cfg, extack); errout: return err; } @@ -845,7 +847,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad cfg.fc_scope = RT_SCOPE_HOST; if (cmd == RTM_NEWROUTE) - fib_table_insert(net, tb, &cfg); + fib_table_insert(net, tb, &cfg, NULL); else fib_table_delete(net, tb, &cfg); } diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 9c02920725db..2704e08545da 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -28,7 +28,8 @@ static inline void fib_alias_accessed(struct fib_alias *fa) /* Exported by fib_semantics.c */ void fib_release_info(struct fib_info *); -struct fib_info *fib_create_info(struct fib_config *cfg); +struct fib_info *fib_create_info(struct fib_config *cfg, + struct netlink_ext_ack *extack); int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index da449ddb8cc1..8587d1b55b53 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -454,7 +454,8 @@ static int fib_detect_death(struct fib_info *fi, int order, #ifdef CONFIG_IP_ROUTE_MULTIPATH -static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) +static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining, + struct netlink_ext_ack *extack) { int nhs = 0; @@ -468,7 +469,8 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) } static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, - int remaining, struct fib_config *cfg) + int remaining, struct fib_config *cfg, + struct netlink_ext_ack *extack) { int ret; @@ -714,7 +716,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) * |-> {local prefix} (terminal node) */ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, - struct fib_nh *nh) + struct fib_nh *nh, struct netlink_ext_ack *extack) { int err = 0; struct net *net; @@ -797,7 +799,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) return -EINVAL; - rcu_read_lock(); err = -ENODEV; in_dev = inetdev_by_index(net, nh->nh_oif); @@ -980,7 +981,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) return 0; } -struct fib_info *fib_create_info(struct fib_config *cfg) +struct fib_info *fib_create_info(struct fib_config *cfg, + struct netlink_ext_ack *extack) { int err; struct fib_info *fi = NULL; @@ -1000,7 +1002,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) #ifdef CONFIG_IP_ROUTE_MULTIPATH if (cfg->fc_mp) { - nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); + nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack); if (nhs == 0) goto err_inval; } @@ -1062,7 +1064,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) if (cfg->fc_mp) { #ifdef CONFIG_IP_ROUTE_MULTIPATH - err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); + err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack); if (err != 0) goto failure; if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) @@ -1129,7 +1131,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg) struct fib_nh *nh = fi->fib_nh; /* Local address is added. */ - if (nhs != 1 || nh->nh_gw) + if (nhs != 1) + goto err_inval; + if (nh->nh_gw) goto err_inval; nh->nh_scope = RT_SCOPE_NOWHERE; nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); @@ -1140,7 +1144,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) int linkdown = 0; change_nexthops(fi) { - err = fib_check_nh(cfg, fi, nexthop_nh); + err = fib_check_nh(cfg, fi, nexthop_nh, extack); if (err != 0) goto failure; if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 51182ff2b441..6d0f6c79d9aa 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1101,7 +1101,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, /* Caller must hold RTNL. */ int fib_table_insert(struct net *net, struct fib_table *tb, - struct fib_config *cfg) + struct fib_config *cfg, struct netlink_ext_ack *extack) { enum fib_event_type event = FIB_EVENT_ENTRY_ADD; struct trie *t = (struct trie *)tb->tb_data; @@ -1125,7 +1125,7 @@ int fib_table_insert(struct net *net, struct fib_table *tb, if ((plen < KEYLENGTH) && (key << plen)) return -EINVAL; - fi = fib_create_info(cfg); + fi = fib_create_info(cfg, extack); if (IS_ERR(fi)) { err = PTR_ERR(fi); goto err; -- cgit v1.2.3 From c3ab2b4ec8f7c0700bf10957171c479bf3dbca52 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 21 May 2017 10:12:03 -0600 Subject: net: ipv4: Add extack messages for route add failures Add messages for non-obvious errors (e.g, no need to add text for malloc failures or ENODEV failures). This mostly covers the annoying EINVAL errors Some message strings violate the 80-columns but searchable strings need to trump that rule. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/netlink.h | 5 +++ net/ipv4/fib_frontend.c | 2 + net/ipv4/fib_semantics.c | 115 ++++++++++++++++++++++++++++++++++++++--------- 3 files changed, 100 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5fff5ba5964e..a68aad484c69 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -97,6 +97,11 @@ struct netlink_ext_ack { #define NL_SET_ERR_MSG_MOD(extack, msg) \ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) +#define NL_SET_BAD_ATTR(extack, attr) do { \ + if ((extack)) \ + (extack)->bad_attr = (attr); \ +} while (0) + extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 511edff76c01..14d2f7bd7c76 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -656,6 +656,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, cfg->fc_nlinfo.nl_net = net; if (cfg->fc_type > RTN_MAX) { + NL_SET_ERR_MSG(extack, "Invalid route type"); err = -EINVAL; goto errout; } @@ -726,6 +727,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, tb = fib_get_table(net, cfg.fc_table); if (!tb) { + NL_SET_ERR_MSG(extack, "FIB table does not exist"); err = -ESRCH; goto errout; } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 8587d1b55b53..4852e183afe0 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -465,7 +466,13 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining, } /* leftover implies invalid nexthop configuration, discard it */ - return remaining > 0 ? 0 : nhs; + if (remaining > 0) { + NL_SET_ERR_MSG(extack, + "Invalid nexthop configuration - extra data after nexthops"); + nhs = 0; + } + + return nhs; } static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, @@ -477,11 +484,17 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, change_nexthops(fi) { int attrlen; - if (!rtnh_ok(rtnh, remaining)) + if (!rtnh_ok(rtnh, remaining)) { + NL_SET_ERR_MSG(extack, + "Invalid nexthop configuration - extra data after nexthop"); return -EINVAL; + } - if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) + if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { + NL_SET_ERR_MSG(extack, + "Invalid flags for nexthop - can not contain DEAD or LINKDOWN"); return -EINVAL; + } nexthop_nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; @@ -507,8 +520,12 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); - if (!nla_entype) + if (!nla_entype) { + NL_SET_BAD_ATTR(extack, nla); + NL_SET_ERR_MSG(extack, + "Encap type is missing"); goto err_inval; + } ret = lwtunnel_build_state(nla_get_u16( nla_entype), @@ -729,16 +746,25 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, if (nh->nh_flags & RTNH_F_ONLINK) { unsigned int addr_type; - if (cfg->fc_scope >= RT_SCOPE_LINK) + if (cfg->fc_scope >= RT_SCOPE_LINK) { + NL_SET_ERR_MSG(extack, + "Nexthop has invalid scope"); return -EINVAL; + } dev = __dev_get_by_index(net, nh->nh_oif); if (!dev) return -ENODEV; - if (!(dev->flags & IFF_UP)) + if (!(dev->flags & IFF_UP)) { + NL_SET_ERR_MSG(extack, + "Nexthop device is not up"); return -ENETDOWN; + } addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw); - if (addr_type != RTN_UNICAST) + if (addr_type != RTN_UNICAST) { + NL_SET_ERR_MSG(extack, + "Nexthop has invalid gateway"); return -EINVAL; + } if (!netif_carrier_ok(dev)) nh->nh_flags |= RTNH_F_LINKDOWN; nh->nh_dev = dev; @@ -778,18 +804,25 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, } if (err) { + NL_SET_ERR_MSG(extack, + "Nexthop has invalid gateway"); rcu_read_unlock(); return err; } } err = -EINVAL; - if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) + if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) { + NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway"); goto out; + } nh->nh_scope = res.scope; nh->nh_oif = FIB_RES_OIF(res); nh->nh_dev = dev = FIB_RES_DEV(res); - if (!dev) + if (!dev) { + NL_SET_ERR_MSG(extack, + "No egress device for nexthop gateway"); goto out; + } dev_hold(dev); if (!netif_carrier_ok(dev)) nh->nh_flags |= RTNH_F_LINKDOWN; @@ -797,16 +830,21 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, } else { struct in_device *in_dev; - if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) + if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) { + NL_SET_ERR_MSG(extack, + "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set"); return -EINVAL; + } rcu_read_lock(); err = -ENODEV; in_dev = inetdev_by_index(net, nh->nh_oif); if (!in_dev) goto out; err = -ENETDOWN; - if (!(in_dev->dev->flags & IFF_UP)) + if (!(in_dev->dev->flags & IFF_UP)) { + NL_SET_ERR_MSG(extack, "Device for nexthop is not up"); goto out; + } nh->nh_dev = in_dev->dev; dev_hold(nh->nh_dev); nh->nh_scope = RT_SCOPE_HOST; @@ -994,11 +1032,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg, goto err_inval; /* Fast check to catch the most weird cases */ - if (fib_props[cfg->fc_type].scope > cfg->fc_scope) + if (fib_props[cfg->fc_type].scope > cfg->fc_scope) { + NL_SET_ERR_MSG(extack, "Invalid scope"); goto err_inval; + } - if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) + if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { + NL_SET_ERR_MSG(extack, + "Invalid rtm_flags - can not contain DEAD or LINKDOWN"); goto err_inval; + } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (cfg->fc_mp) { @@ -1067,15 +1110,26 @@ struct fib_info *fib_create_info(struct fib_config *cfg, err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack); if (err != 0) goto failure; - if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) + if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) { + NL_SET_ERR_MSG(extack, + "Nexthop device index does not match RTA_OIF"); goto err_inval; - if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) + } + if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) { + NL_SET_ERR_MSG(extack, + "Nexthop gateway does not match RTA_GATEWAY"); goto err_inval; + } #ifdef CONFIG_IP_ROUTE_CLASSID - if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) + if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) { + NL_SET_ERR_MSG(extack, + "Nexthop class id does not match RTA_FLOW"); goto err_inval; + } #endif #else + NL_SET_ERR_MSG(extack, + "Multipath support not enabled in kernel"); goto err_inval; #endif } else { @@ -1084,8 +1138,11 @@ struct fib_info *fib_create_info(struct fib_config *cfg, if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; - if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) + if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) { + NL_SET_ERR_MSG(extack, + "LWT encap type not specified"); goto err_inval; + } err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET, cfg, &lwtstate); @@ -1108,8 +1165,11 @@ struct fib_info *fib_create_info(struct fib_config *cfg, } if (fib_props[cfg->fc_type].error) { - if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) + if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) { + NL_SET_ERR_MSG(extack, + "Gateway, device and multipath can not be specified for this route type"); goto err_inval; + } goto link_it; } else { switch (cfg->fc_type) { @@ -1120,21 +1180,30 @@ struct fib_info *fib_create_info(struct fib_config *cfg, case RTN_MULTICAST: break; default: + NL_SET_ERR_MSG(extack, "Invalid route type"); goto err_inval; } } - if (cfg->fc_scope > RT_SCOPE_HOST) + if (cfg->fc_scope > RT_SCOPE_HOST) { + NL_SET_ERR_MSG(extack, "Invalid scope"); goto err_inval; + } if (cfg->fc_scope == RT_SCOPE_HOST) { struct fib_nh *nh = fi->fib_nh; /* Local address is added. */ - if (nhs != 1) + if (nhs != 1) { + NL_SET_ERR_MSG(extack, + "Route with host scope can not have multiple nexthops"); goto err_inval; - if (nh->nh_gw) + } + if (nh->nh_gw) { + NL_SET_ERR_MSG(extack, + "Route with host scope can not have a gateway"); goto err_inval; + } nh->nh_scope = RT_SCOPE_NOWHERE; nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); err = -ENODEV; @@ -1154,8 +1223,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg, fi->fib_flags |= RTNH_F_LINKDOWN; } - if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) + if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) { + NL_SET_ERR_MSG(extack, "Invalid prefsrc address"); goto err_inval; + } change_nexthops(fi) { fib_info_update_nh_saddr(net, nexthop_nh); -- cgit v1.2.3 From 333c430167c21b96de81a674fa6cbe84b09475dc Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 21 May 2017 10:12:04 -0600 Subject: net: ipv6: Plumb extack through route add functions Plumb extack argument down to route add functions. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 3 ++- include/net/ip6_route.h | 2 +- net/ipv6/addrconf.c | 4 ++-- net/ipv6/ip6_fib.c | 14 +++++++----- net/ipv6/route.c | 57 +++++++++++++++++++++++++++---------------------- 5 files changed, 46 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c979c878df1c..aa50e2e6fa2a 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -277,7 +277,8 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), void *arg); int fib6_add(struct fib6_node *root, struct rt6_info *rt, - struct nl_info *info, struct mx6_config *mxc); + struct nl_info *info, struct mx6_config *mxc, + struct netlink_ext_ack *extack); int fib6_del(struct rt6_info *rt, struct nl_info *info); void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f5e625f53367..f3da9dd2a8db 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -90,7 +90,7 @@ void ip6_route_cleanup(void); int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); -int ip6_route_add(struct fib6_config *cfg); +int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack); int ip6_ins_rt(struct rt6_info *); int ip6_del_rt(struct rt6_info *); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a4fb1e629fb..25443fd946a8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2280,7 +2280,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, cfg.fc_flags |= RTF_NONEXTHOP; #endif - ip6_route_add(&cfg); + ip6_route_add(&cfg, NULL); } @@ -2335,7 +2335,7 @@ static void addrconf_add_mroute(struct net_device *dev) ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); - ip6_route_add(&cfg); + ip6_route_add(&cfg, NULL); } static struct inet6_dev *addrconf_add_dev(struct net_device *dev) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d4bf2c68a545..c1197e167d3e 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -473,7 +473,8 @@ out: static struct fib6_node *fib6_add_1(struct fib6_node *root, struct in6_addr *addr, int plen, int offset, int allow_create, - int replace_required, int sernum) + int replace_required, int sernum, + struct netlink_ext_ack *extack) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; @@ -964,7 +965,8 @@ void fib6_force_start_gc(struct net *net) */ int fib6_add(struct fib6_node *root, struct rt6_info *rt, - struct nl_info *info, struct mx6_config *mxc) + struct nl_info *info, struct mx6_config *mxc, + struct netlink_ext_ack *extack) { struct fib6_node *fn, *pn = NULL; int err = -ENOMEM; @@ -987,7 +989,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), allow_create, - replace_required, sernum); + replace_required, sernum, extack); if (IS_ERR(fn)) { err = PTR_ERR(fn); fn = NULL; @@ -1028,7 +1030,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, sn = fib6_add_1(sfn, &rt->rt6i_src.addr, rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required, sernum); + allow_create, replace_required, sernum, + extack); if (IS_ERR(sn)) { /* If it is failed, discard just allocated @@ -1047,7 +1050,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required, sernum); + allow_create, replace_required, sernum, + extack); if (IS_ERR(sn)) { err = PTR_ERR(sn); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index dc61b0b5e64e..ca754ec4054a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -938,14 +938,15 @@ EXPORT_SYMBOL(rt6_lookup); */ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, - struct mx6_config *mxc) + struct mx6_config *mxc, + struct netlink_ext_ack *extack) { int err; struct fib6_table *table; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); - err = fib6_add(&table->tb6_root, rt, info, mxc); + err = fib6_add(&table->tb6_root, rt, info, mxc, extack); write_unlock_bh(&table->tb6_lock); return err; @@ -956,7 +957,7 @@ int ip6_ins_rt(struct rt6_info *rt) struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; struct mx6_config mxc = { .mx = NULL, }; - return __ip6_ins_rt(rt, &info, &mxc); + return __ip6_ins_rt(rt, &info, &mxc, NULL); } static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, @@ -1844,7 +1845,8 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net, return rt; } -static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) +static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; @@ -2111,13 +2113,14 @@ out: return ERR_PTR(err); } -int ip6_route_add(struct fib6_config *cfg) +int ip6_route_add(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct mx6_config mxc = { .mx = NULL, }; struct rt6_info *rt; int err; - rt = ip6_route_info_create(cfg); + rt = ip6_route_info_create(cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; @@ -2128,7 +2131,7 @@ int ip6_route_add(struct fib6_config *cfg) if (err) goto out; - err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); + err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack); kfree(mxc.mx); @@ -2222,7 +2225,8 @@ out_put: return err; } -static int ip6_route_del(struct fib6_config *cfg) +static int ip6_route_del(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct fib6_table *table; struct fib6_node *fn; @@ -2483,7 +2487,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, if (!prefixlen) cfg.fc_flags |= RTF_DEFAULT; - ip6_route_add(&cfg); + ip6_route_add(&cfg, NULL); return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); } @@ -2529,7 +2533,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, cfg.fc_gateway = *gwaddr; - if (!ip6_route_add(&cfg)) { + if (!ip6_route_add(&cfg, NULL)) { struct fib6_table *table; table = fib6_get_table(dev_net(dev), cfg.fc_table); @@ -2622,10 +2626,10 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) rtnl_lock(); switch (cmd) { case SIOCADDRT: - err = ip6_route_add(&cfg); + err = ip6_route_add(&cfg, NULL); break; case SIOCDELRT: - err = ip6_route_del(&cfg); + err = ip6_route_del(&cfg, NULL); break; default: err = -EINVAL; @@ -2903,7 +2907,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { }; static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, - struct fib6_config *cfg) + struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; @@ -3097,7 +3102,8 @@ static void ip6_route_mpath_notify(struct rt6_info *rt, inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); } -static int ip6_route_multipath_add(struct fib6_config *cfg) +static int ip6_route_multipath_add(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct rt6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; @@ -3145,7 +3151,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) r_cfg.fc_encap_type = nla_get_u16(nla); } - rt = ip6_route_info_create(&r_cfg); + rt = ip6_route_info_create(&r_cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; @@ -3170,7 +3176,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { rt_last = nh->rt6_info; - err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc); + err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); /* save reference to first route for notification */ if (!rt_notif && !err) rt_notif = nh->rt6_info; @@ -3212,7 +3218,7 @@ add_errout: list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) break; - ip6_route_del(&nh->r_cfg); + ip6_route_del(&nh->r_cfg, extack); } cleanup: @@ -3227,7 +3233,8 @@ cleanup: return err; } -static int ip6_route_multipath_del(struct fib6_config *cfg) +static int ip6_route_multipath_del(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct fib6_config r_cfg; struct rtnexthop *rtnh; @@ -3254,7 +3261,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg) r_cfg.fc_flags |= RTF_GATEWAY; } } - err = ip6_route_del(&r_cfg); + err = ip6_route_del(&r_cfg, extack); if (err) last_err = err; @@ -3270,15 +3277,15 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib6_config cfg; int err; - err = rtm_to_fib6_config(skb, nlh, &cfg); + err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) - return ip6_route_multipath_del(&cfg); + return ip6_route_multipath_del(&cfg, extack); else { cfg.fc_delete_all_nh = 1; - return ip6_route_del(&cfg); + return ip6_route_del(&cfg, extack); } } @@ -3288,14 +3295,14 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib6_config cfg; int err; - err = rtm_to_fib6_config(skb, nlh, &cfg); + err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) - return ip6_route_multipath_add(&cfg); + return ip6_route_multipath_add(&cfg, extack); else - return ip6_route_add(&cfg); + return ip6_route_add(&cfg, extack); } static size_t rt6_nlmsg_size(struct rt6_info *rt) -- cgit v1.2.3 From 7254a50a5db40ca6739ddf37e0a45e6912532b2c Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 22 May 2017 23:05:05 +0800 Subject: blk-mq: remove blk_mq_abort_requeue_list() No one uses it any more, so remove it. Reviewed-by: Keith Busch Reviewed-by: Johannes Thumshirn Signed-off-by: Ming Lei Signed-off-by: Christoph Hellwig --- block/blk-mq.c | 19 ------------------- include/linux/blk-mq.h | 1 - 2 files changed, 20 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index a69ad122ed66..f2224ffd225d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); -void blk_mq_abort_requeue_list(struct request_queue *q) -{ - unsigned long flags; - LIST_HEAD(rq_list); - - spin_lock_irqsave(&q->requeue_lock, flags); - list_splice_init(&q->requeue_list, &rq_list); - spin_unlock_irqrestore(&q->requeue_lock, flags); - - while (!list_empty(&rq_list)) { - struct request *rq; - - rq = list_first_entry(&rq_list, struct request, queuelist); - list_del_init(&rq->queuelist); - blk_mq_end_request(rq, -EIO); - } -} -EXPORT_SYMBOL(blk_mq_abort_requeue_list); - struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { if (tag < tags->nr_tags) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c47aa248c640..fcd641032f8d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); bool blk_mq_queue_stopped(struct request_queue *q); -- cgit v1.2.3 From 52c96f9d7003c74c7fbec7438c0ed78df0cc1c79 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 19 May 2017 17:00:51 -0400 Subject: net: dsa: move notifier info to private header The DSA notifier events and info structure definitions are not meant for DSA drivers and users, but only used internally by the DSA core files. Move them from the public net/dsa.h file to the private dsa_priv.h file. Also use this opportunity to turn the events into an anonymous enum, because we don't care about the values, and this will prevent future conflicts when adding (and sorting) new events. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 10 ---------- net/dsa/dsa_priv.h | 12 ++++++++++++ 2 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 791fed62fb16..c0e567c0c824 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -285,16 +285,6 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) return ds->rtable[dst->cpu_dp->ds->index]; } -#define DSA_NOTIFIER_BRIDGE_JOIN 1 -#define DSA_NOTIFIER_BRIDGE_LEAVE 2 - -/* DSA_NOTIFIER_BRIDGE_* */ -struct dsa_notifier_bridge_info { - struct net_device *br; - int sw_index; - int port; -}; - struct dsa_switch_ops { /* * Legacy probing. diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 16021a891095..c19241eb094b 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -16,6 +16,18 @@ #include #include +enum { + DSA_NOTIFIER_BRIDGE_JOIN, + DSA_NOTIFIER_BRIDGE_LEAVE, +}; + +/* DSA_NOTIFIER_BRIDGE_* */ +struct dsa_notifier_bridge_info { + struct net_device *br; + int sw_index; + int port; +}; + struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, -- cgit v1.2.3 From c61872c9833d17d3807fb999096917c1f9eaada0 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 17 May 2017 13:25:12 +0300 Subject: firmware: dmi: Add DMI_PRODUCT_FAMILY identification string Sometimes it is more convenient to be able to match a whole family of products, like in case of bunch of Chromebooks based on Intel_Strago to apply a driver quirk instead of quirking each machine one-by-one. This adds support for DMI_PRODUCT_FAMILY identification string and also exports it to the userspace through sysfs attribute just like the existing ones. Suggested-by: Dmitry Torokhov Signed-off-by: Mika Westerberg Reviewed-by: Andy Shevchenko Signed-off-by: Linus Walleij --- drivers/firmware/dmi-id.c | 2 ++ drivers/firmware/dmi_scan.c | 1 + include/linux/mod_devicetable.h | 1 + 3 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 44c01390d035..dc269cb288c2 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME); DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); +DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); @@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void) ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); + ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 54be60ead08f..93f7acdaac7a 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy) dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); + dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); break; case 2: /* Base Board Information */ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 566fda587fcf..3f74ef2281e8 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -467,6 +467,7 @@ enum dmi_field { DMI_PRODUCT_VERSION, DMI_PRODUCT_SERIAL, DMI_PRODUCT_UUID, + DMI_PRODUCT_FAMILY, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, -- cgit v1.2.3 From c70d9d809fdeecedb96972457ee45c49a232d97f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 May 2017 15:40:12 -0500 Subject: ptrace: Properly initialize ptracer_cred on fork When I introduced ptracer_cred I failed to consider the weirdness of fork where the task_struct copies the old value by default. This winds up leaving ptracer_cred set even when a process forks and the child process does not wind up being ptraced. Because ptracer_cred is not set on non-ptraced processes whose parents were ptraced this has broken the ability of the enlightenment window manager to start setuid children. Fix this by properly initializing ptracer_cred in ptrace_init_task This must be done with a little bit of care to preserve the current value of ptracer_cred when ptrace carries through fork. Re-reading the ptracer_cred from the ptracing process at this point is inconsistent with how PT_PTRACE_CAP has been maintained all of these years. Tested-by: Takashi Iwai Fixes: 64b875f7ac8a ("ptrace: Capture the ptracer's creds not PT_PTRACE_CAP") Signed-off-by: "Eric W. Biederman" --- include/linux/ptrace.h | 7 +++++-- kernel/ptrace.c | 20 +++++++++++++------- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 422bc2e4cb6a..ef3eb8bbfee4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, - struct task_struct *new_parent); + struct task_struct *new_parent, + const struct cred *ptracer_cred); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 @@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent); + __ptrace_link(child, current->parent, current->ptracer_cred); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); @@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) set_tsk_thread_flag(child, TIF_SIGPENDING); } + else + child->ptracer_cred = NULL; } /** diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 266ddcc1d8bb..60f356d91060 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, } +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, + const struct cred *ptracer_cred) +{ + BUG_ON(!list_empty(&child->ptrace_entry)); + list_add(&child->ptrace_entry, &new_parent->ptraced); + child->parent = new_parent; + child->ptracer_cred = get_cred(ptracer_cred); +} + /* * ptrace a task: make the debugger its new parent and * move it to the ptrace list. * * Must be called with the tasklist lock write-held. */ -void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) +static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) { - BUG_ON(!list_empty(&child->ptrace_entry)); - list_add(&child->ptrace_entry, &new_parent->ptraced); - child->parent = new_parent; rcu_read_lock(); - child->ptracer_cred = get_cred(__task_cred(new_parent)); + __ptrace_link(child, new_parent, __task_cred(new_parent)); rcu_read_unlock(); } @@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request, flags |= PT_SEIZED; task->ptrace = flags; - __ptrace_link(task, current); + ptrace_link(task, current); /* SEIZE doesn't trap tracee on attach */ if (!seize) @@ -459,7 +465,7 @@ static int ptrace_traceme(void) */ if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace = PT_PTRACED; - __ptrace_link(current, current->real_parent); + ptrace_link(current, current->real_parent); } } write_unlock_irq(&tasklist_lock); -- cgit v1.2.3 From 3aa4266405a6c2e03eb0ff12d7c573d3d903da4c Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 10 May 2017 13:48:41 +0300 Subject: net/sched: act_csum: Add accessors for offloading drivers Add the accessors for realizing if this is a csum action, and for which fields checksum is needed. Signed-off-by: Or Gerlitz Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- include/net/tc_act/tc_csum.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index f31fb6331a53..3248beaf16b0 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h @@ -3,6 +3,7 @@ #include #include +#include struct tcf_csum { struct tc_action common; @@ -11,4 +12,18 @@ struct tcf_csum { }; #define to_tcf_csum(a) ((struct tcf_csum *)a) +static inline bool is_tcf_csum(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_CSUM) + return true; +#endif + return false; +} + +static inline u32 tcf_csum_update_flags(const struct tc_action *a) +{ + return to_tcf_csum(a)->update_flags; +} + #endif /* __NET_TC_CSUM_H */ -- cgit v1.2.3 From 73dd3a4839c1d27c36d4dcc92e1ff44225ecbeb7 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Thu, 23 Feb 2017 11:19:36 +0200 Subject: net/mlx5: Avoid using pending command interface slots Currently when firmware command gets stuck or it takes long time to complete, the driver command will get timeout and the command slot is freed and can be used for new commands, and if the firmware receive new command on the old busy slot its behavior is unexpected and this could be harmful. To fix this when the driver command gets timeout we return failure, but we don't free the command slot and we wait for the firmware to explicitly respond to that command. Once all the entries are busy we will stop processing new firmware commands. Fixes: 9cba4ebcf374 ('net/mlx5: Fix potential deadlock in command mode change') Signed-off-by: Mohamad Haj Yahia Cc: kernel-team@fb.com Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 41 +++++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/health.c | 2 +- include/linux/mlx5/driver.h | 7 +++- 4 files changed, 44 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5bdaf3d545b2..10d282841f5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work) mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } static void cmd_work_handler(struct work_struct *work) @@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + u8 status = 0; + u32 drv_synd; + + ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); + MLX5_SET(mbox_out, ent->out, status, status); + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); + + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + return; + } + /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); } } @@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ent->ret = -ETIMEDOUT; - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } err = ent->ret; @@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) } } -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) struct semaphore *sem; ent = cmd->ent_arr[i]; + + /* if we already completed the command, ignore it */ + if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, + &ent->state)) { + /* only real completion can free the cmd slot */ + if (!forced) { + mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", + ent->idx); + free_ent(cmd, ent->idx); + } + continue; + } + if (ent->callback) cancel_delayed_work(&ent->cb_timeout_work); if (ent->page_queue) @@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); } - free_ent(cmd, ent->idx); + + /* only real completion will free the entry slot */ + if (!forced) + free_ent(cmd, ent->idx); if (ent->callback) { ds = ent->ts2 - ent->ts1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ea5d8d37a75c..33eae5ad2fb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_CMD: - mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); break; case MLX5_EVENT_TYPE_PORT_CHANGE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d0515391d33b..44f59b1d6f0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev) spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); - mlx5_cmd_comp_handler(dev, vector); + mlx5_cmd_comp_handler(dev, vector, true); return; no_trig: diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bcdf739ee41a..93273d9ea4d1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -787,7 +787,12 @@ enum { typedef void (*mlx5_cmd_cbk_t)(int status, void *context); +enum { + MLX5_CMD_ENT_STATE_PENDING_COMP, +}; + struct mlx5_cmd_work_ent { + unsigned long state; struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; void *uout; @@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, -- cgit v1.2.3 From 7f65b1f5adc5f8496ca8bec4947de66fefe36220 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Mon, 22 May 2017 14:50:30 +0200 Subject: cdc-ether: divorce initialisation with a filter reset and a generic method Some devices need their multicast filter reset but others are crashed by that. So the methods need to be separated. Signed-off-by: Oliver Neukum Reported-by: "Ridgway, Keith" Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ether.c | 31 ++++++++++++++++++++++++------- include/linux/usb/usbnet.h | 1 + 2 files changed, 25 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index f3ae88fdf332..8ab281b478f2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -310,6 +310,26 @@ skip: return -ENODEV; } + return 0; + +bad_desc: + dev_info(&dev->udev->dev, "bad CDC descriptors\n"); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); + + +/* like usbnet_generic_cdc_bind() but handles filter initialization + * correctly + */ +int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int rv; + + rv = usbnet_generic_cdc_bind(dev, intf); + if (rv < 0) + goto bail_out; + /* Some devices don't initialise properly. In particular * the packet filter is not reset. There are devices that * don't do reset all the way. So the packet filter should @@ -317,13 +337,10 @@ skip: */ usbnet_cdc_update_filter(dev); - return 0; - -bad_desc: - dev_info(&dev->udev->dev, "bad CDC descriptors\n"); - return -ENODEV; +bail_out: + return rv; } -EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); +EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind); void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) { @@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct cdc_state))); - status = usbnet_generic_cdc_bind(dev, intf); + status = usbnet_ether_cdc_bind(dev, intf); if (status < 0) return status; diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 7dffa5624ea6..97116379db5f 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -206,6 +206,7 @@ struct cdc_state { }; extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); +extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_status(struct usbnet *, struct urb *); -- cgit v1.2.3 From 12e8b570e732eaa5eae3a2895ba3fbcf91bde2b4 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 22 May 2017 20:13:07 +0200 Subject: mlx5: fix bug reading rss_hash_type from CQE Masks for extracting part of the Completion Queue Entry (CQE) field rss_hash_type was swapped, namely CQE_RSS_HTYPE_IP and CQE_RSS_HTYPE_L4. The bug resulted in setting skb->l4_hash, even-though the rss_hash_type indicated that hash was NOT computed over the L4 (UDP or TCP) part of the packet. Added comments from the datasheet, to make it more clear what these masks are selecting. Signed-off-by: Jesper Dangaard Brouer Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/device.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dd9a263ed368..a940ec6a046c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -787,8 +787,14 @@ enum { }; enum { - CQE_RSS_HTYPE_IP = 0x3 << 6, - CQE_RSS_HTYPE_L4 = 0x3 << 2, + CQE_RSS_HTYPE_IP = 0x3 << 2, + /* cqe->rss_hash_type[3:2] - IP destination selected for hash + * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) + */ + CQE_RSS_HTYPE_L4 = 0x3 << 6, + /* cqe->rss_hash_type[7:6] - L4 destination selected for hash + * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI + */ }; enum { -- cgit v1.2.3 From 4c0ebd6fed66388584abb27d7b0f188cc1ec01fe Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 23 May 2017 00:20:26 +0300 Subject: net: make struct inet_frags::qsize unsigned This field is sizeof of corresponding kmem_cache so it can't be negative. Prepare for 32-bit kmem_cache_create(). Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/net/inet_frag.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 5894730ec82a..975779d0e7b0 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -92,7 +92,7 @@ struct inet_frags { */ u32 rnd; seqlock_t rnd_seqlock; - int qsize; + unsigned int qsize; unsigned int (*hashfn)(const struct inet_frag_queue *); bool (*match)(const struct inet_frag_queue *q, -- cgit v1.2.3 From 417ccf6b5bc3f1a390505d5ef65ec17f10e8b29a Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 23 May 2017 00:21:39 +0300 Subject: net: make struct request_sock_ops::obj_size unsigned This field is sizeof of corresponding kmem_cache so it can't be negative. Space will be saved after 32-bit kmem_cache_create() patch. Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/net/request_sock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/request_sock.h b/include/net/request_sock.h index a12a5d25b27e..53ced67c4ae9 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -29,7 +29,7 @@ struct proto; struct request_sock_ops { int family; - int obj_size; + unsigned int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *sk, -- cgit v1.2.3 From 6f4dbd149d2a151b89d1a5bbf7530ee5546c7908 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 19 May 2017 11:33:16 +0200 Subject: libceph: use kbasename() and kill ceph_file_part() Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder --- include/linux/ceph/ceph_debug.h | 6 +++--- net/ceph/ceph_common.c | 13 ------------- 2 files changed, 3 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index aa2e19182d99..51c5bd64bd00 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -3,6 +3,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include + #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG /* @@ -12,12 +14,10 @@ */ # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) -extern const char *ceph_file_part(const char *s, int len); # define dout(fmt, ...) \ pr_debug("%.*s %12.12s:%-4d : " fmt, \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \ - ceph_file_part(__FILE__, sizeof(__FILE__)), \ - __LINE__, ##__VA_ARGS__) + kbasename(__FILE__), __LINE__, ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ # define dout(fmt, ...) do { \ diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 4fd02831beed..47e94b560ba0 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = { module_param_cb(supported_features, ¶m_ops_supported_features, NULL, S_IRUGO); -/* - * find filename portion of a path (/foo/bar/baz -> baz) - */ -const char *ceph_file_part(const char *s, int len) -{ - const char *e = s + len; - - while (e != s && *(e-1) != '/') - e--; - return e; -} -EXPORT_SYMBOL(ceph_file_part); - const char *ceph_msg_type_name(int type) { switch (type) { -- cgit v1.2.3 From 4d071c3238987325b9e50e33051a40d1cce311cc Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 23 May 2017 14:18:17 -0500 Subject: PCI/PM: Add needs_resume flag to avoid suspend complete optimization Some drivers - like i915 - may not support the system suspend direct complete optimization due to differences in their runtime and system suspend sequence. Add a flag that when set resumes the device before calling the driver's system suspend handlers which effectively disables the optimization. Needed by a future patch fixing suspend/resume on i915. Suggested by Rafael. Signed-off-by: Imre Deak Signed-off-by: Bjorn Helgaas Acked-by: Rafael J. Wysocki Cc: stable@vger.kernel.org --- drivers/pci/pci.c | 3 ++- include/linux/pci.h | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b01bd5bba8e6..563901cd9c06 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) if (!pm_runtime_suspended(dev) || pci_target_state(pci_dev) != pci_dev->current_state - || platform_pci_need_resume(pci_dev)) + || platform_pci_need_resume(pci_dev) + || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) return false; /* diff --git a/include/linux/pci.h b/include/linux/pci.h index 33c2b0b77429..df7dd9021646 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -183,6 +183,11 @@ enum pci_dev_flags { PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), /* Do not use FLR even if device advertises PCI_AF_CAP */ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), + /* + * Resume before calling the driver's system suspend hooks, disabling + * the direct_complete optimization. + */ + PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), }; enum pci_irq_reroute_variant { -- cgit v1.2.3 From 41c25707d21716826e3c1f60967f5550610ec1c9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 May 2017 12:03:48 -0400 Subject: cpuset: consider dying css as offline In most cases, a cgroup controller don't care about the liftimes of cgroups. For the controller, a css becomes online when ->css_online() is called on it and offline when ->css_offline() is called. However, cpuset is special in that the user interface it exposes cares whether certain cgroups exist or not. Combined with the RCU delay between cgroup removal and css offlining, this can lead to user visible behavior oddities where operations which should succeed after cgroup removals fail for some time period. The effects of cgroup removals are delayed when seen from userland. This patch adds css_is_dying() which tests whether offline is pending and updates is_cpuset_online() so that the function returns false also while offline is pending. This gets rid of the userland visible delays. Signed-off-by: Tejun Heo Reported-by: Daniel Jordan Link: http://lkml.kernel.org/r/327ca1f5-7957-fbb9-9e5f-9ba149d40ba2@oracle.com Cc: stable@vger.kernel.org Signed-off-by: Tejun Heo --- include/linux/cgroup.h | 20 ++++++++++++++++++++ kernel/cgroup/cpuset.c | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed2573e149fa..710a005c6b7a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) return true; } +/** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + /** * css_put - put a css reference * @css: target css diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index f6501f4f6040..ae643412948a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -176,9 +176,9 @@ typedef enum { } cpuset_flagbits_t; /* convenient tests for these bits */ -static inline bool is_cpuset_online(const struct cpuset *cs) +static inline bool is_cpuset_online(struct cpuset *cs) { - return test_bit(CS_ONLINE, &cs->flags); + return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); } static inline int is_cpu_exclusive(const struct cpuset *cs) -- cgit v1.2.3 From 9d7650c25498e4f51213fe48eddde5778434f375 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Tue, 23 May 2017 09:41:19 +0300 Subject: qed: Align DP_ERR style with other DP macros Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- include/linux/qed/qed_if.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index c70ac13a97e6..ff590cb37a00 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -700,11 +700,13 @@ struct qed_common_ops { (((value) >> (name ## _SHIFT)) & name ## _MASK) /* Debug print definitions */ -#define DP_ERR(cdev, fmt, ...) \ - pr_err("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - DP_NAME(cdev) ? DP_NAME(cdev) : "", \ - ## __VA_ARGS__) \ +#define DP_ERR(cdev, fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } while (0) #define DP_NOTICE(cdev, fmt, ...) \ do { \ -- cgit v1.2.3 From ae33666ab89675968d77753d18452b1ef654c43a Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Tue, 23 May 2017 09:41:26 +0300 Subject: qed: Provide MBI information in dev_info Pass additional information about package installed on persistent memory so that protocol drivers would be able to log it. Signed-off-by: Tomer Tayar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 6 ++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 3 +++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 30 ++++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 12 ++++++++++++ include/linux/qed/qed_if.h | 17 +++++++++++++++++ 5 files changed, 68 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 4755d0b33b90..802c162d8474 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11782,6 +11782,12 @@ struct nvm_cfg1_glob { u32 led_global_settings; u32 generic_cont1; u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 u32 mbi_date; u32 misc_sig; u32 device_capabilities; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 3043dcce125c..b5313c561fa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -281,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev, qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, &dev_info->mfw_rev, NULL); + qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mbi_version); + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, &dev_info->flash_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b32e8190f3fb..fc49c75e6c4b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1523,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + /* Read the address of the nvm_cfg */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read the offset of nvm_cfg1 */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, mbi_version); + *p_mbi_ver = qed_rd(p_hwfn, p_ptt, + mbi_ver_addr) & + (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | + NVM_CFG1_GLOB_MBI_VERSION_1_MASK | + NVM_CFG1_GLOB_MBI_VERSION_2_MASK); + + return 0; +} + int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 3e5bffe3d4e2..40247593e772 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -255,6 +255,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); +/** + * @brief Get the MBI version value + * + * @param p_hwfn + * @param p_ptt + * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver); + /** * @brief Get media type value of the port. * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index ff590cb37a00..b00e6753b4f4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -328,6 +328,14 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; u8 mf_mode; @@ -337,6 +345,15 @@ struct qed_dev_info { bool wol_support; + /* MBI version */ + u32 mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + enum qed_dev_type dev_type; /* Output parameters for qede */ -- cgit v1.2.3 From 712c3cbf193fcadf0ba67da61432beb1a71e400b Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Tue, 23 May 2017 09:41:28 +0300 Subject: qed: Replace set_id() api with set_name() Current API between qed and protocol modules allows passing an additional private string - but it doesn't get utilized by qed anywhere. Clarify the API by removing it and renaming it 'set_name'. CC: Manish Rangankar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 1 - drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qedi/qedi_main.c | 2 +- include/linux/qed/qed_if.h | 4 +--- 6 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 2eb6031f0df1..e0becec17b09 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -638,7 +638,6 @@ struct qed_dev { int pcie_width; int pcie_speed; - u8 ver_str[VER_SIZE]; /* Add MF related configuration */ u8 mcp_rev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b5313c561fa2..c5bb80b9afc1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -338,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, if (!cdev) goto err0; + cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; cdev->protocol = params->protocol; if (params->is_vf) @@ -1128,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev) return 0; } -static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], - char ver_str[VER_SIZE]) +static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) { int i; memcpy(cdev->name, name, NAME_SIZE); for_each_hwfn(cdev, i) snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); - - memcpy(cdev->ver_str, ver_str, VER_SIZE); - cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; } static u32 qed_sb_init(struct qed_dev *cdev, @@ -1692,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = { .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, - .set_id = &qed_set_id, + .set_name = &qed_set_name, .update_pf_params = &qed_update_pf_params, .slowpath_start = &qed_slowpath_start, .slowpath_stop = &qed_slowpath_stop, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d496ba70ddb8..00c70625f8a4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -259,7 +259,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, /* Notify qed of the name change */ if (!edev->ops || !edev->ops->common) goto done; - edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede"); + edev->ops->common->set_name(edev->cdev, edev->ndev->name); break; case NETDEV_CHANGEADDR: edev = netdev_priv(ndev); @@ -967,7 +967,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, goto err4; } - edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); + edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a5c97342fd5d..b97405ed6cae 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2954,7 +2954,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); sprintf(host_buf, "host_%d", host->host_no); - qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION); + qed_ops->common->set_name(qedf->cdev, host_buf); /* Set xid max values */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 92775a8b74b1..073b3051bb8f 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1843,7 +1843,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) qedi->mac); sprintf(host_buf, "host_%d", qedi->shost->host_no); - qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION); + qedi_ops->common->set_name(qedi->cdev, host_buf); qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b00e6753b4f4..73c46d6d5727 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -520,9 +520,7 @@ struct qed_common_ops { int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); - void (*set_id)(struct qed_dev *cdev, - char name[], - char ver_str[]); + void (*set_name) (struct qed_dev *cdev, char name[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is -- cgit v1.2.3 From ac4bb5de27010e41f027c635dedca1393e7ebf55 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 23 May 2017 18:40:44 +0200 Subject: net: flow_dissector: add support for dissection of tcp flags Add support for dissection of tcp flags. Uses similar function call to tcp dissection function as arp, mpls and others. Signed-off-by: Jiri Pirko Acked-by: Or Gerlitz Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 9 +++++++++ net/core/flow_dissector.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'include') diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8d21d448daa9..efe34eec61dc 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -157,6 +157,14 @@ struct flow_dissector_key_eth_addrs { unsigned char src[ETH_ALEN]; }; +/** + * struct flow_dissector_key_tcp: + * @flags: flags + */ +struct flow_dissector_key_tcp { + __be16 flags; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -177,6 +185,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ + FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 28d94bce4df8..5a45943081f5 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -342,6 +343,30 @@ __skb_flow_dissect_gre(const struct sk_buff *skb, return FLOW_DISSECT_RET_OUT_PROTO_AGAIN; } +static void +__skb_flow_dissect_tcp(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, int thoff, int hlen) +{ + struct flow_dissector_key_tcp *key_tcp; + struct tcphdr *th, _th; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP)) + return; + + th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th); + if (!th) + return; + + if (unlikely(__tcp_hdrlen(th) < sizeof(_th))) + return; + + key_tcp = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_TCP, + target_container); + key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -683,6 +708,10 @@ ip_proto_again: case IPPROTO_MPLS: proto = htons(ETH_P_MPLS_UC); goto mpls; + case IPPROTO_TCP: + __skb_flow_dissect_tcp(skb, flow_dissector, target_container, + data, nhoff, hlen); + break; default: break; } -- cgit v1.2.3 From fdfc7dd6ca39b117c709dceee8d32ac4447294d6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 23 May 2017 18:40:45 +0200 Subject: net/sched: flower: add support for matching on tcp flags Benefit from the support of tcp flags dissection and allow user to insert rules matching on tcp flags. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 3 +++ net/sched/cls_flower.c | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 1b9aa9e6b4fd..c6e8cf5e9c40 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -451,6 +451,9 @@ enum { TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */ TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */ + TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */ + TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ca526c0881bd..fb74a47830f4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -49,6 +49,7 @@ struct fl_flow_key { }; struct flow_dissector_key_ports enc_tp; struct flow_dissector_key_mpls mpls; + struct flow_dissector_key_tcp tcp; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -424,6 +425,8 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -596,6 +599,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, sizeof(key->tp.dst)); + fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, + &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, + sizeof(key->tcp.flags)); } else if (key->basic.ip_proto == IPPROTO_UDP) { fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, @@ -766,6 +772,8 @@ static void fl_init_dissector(struct cls_fl_head *head, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_TCP, tcp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ICMP, icmp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, @@ -1215,7 +1223,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, sizeof(key->tp.src)) || fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, - sizeof(key->tp.dst)))) + sizeof(key->tp.dst)) || + fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, + &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, + sizeof(key->tcp.flags)))) goto nla_put_failure; else if (key->basic.ip_proto == IPPROTO_UDP && (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, -- cgit v1.2.3 From 35d2f80b07bbe03fb358afb0bdeff7437a7d67ff Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 23 May 2017 13:38:41 -0400 Subject: vlan: Fix tcp checksum offloads in Q-in-Q vlans It appears that TCP checksum offloading has been broken for Q-in-Q vlans. The behavior was execerbated by the series commit afb0bc972b52 ("Merge branch 'stacked_vlan_tso'") that that enabled accleleration features on stacked vlans. However, event without that series, it is possible to trigger this issue. It just requires a lot more specialized configuration. The root cause is the interaction between how netdev_intersect_features() works, the features actually set on the vlan devices and HW having the ability to run checksum with longer headers. The issue starts when netdev_interesect_features() replaces NETIF_F_HW_CSUM with a combination of NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM, if the HW advertises IP|IPV6 specific checksums. This happens for tagged and multi-tagged packets. However, HW that enables IP|IPV6 checksum offloading doesn't gurantee that packets with arbitrarily long headers can be checksummed. This patch disables IP|IPV6 checksums on the packet for multi-tagged packets. CC: Toshiaki Makita CC: Michal Kubecek Signed-off-by: Vladislav Yasevich Acked-by: Toshiaki Makita Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8d5fcd6284ce..283dc2f5364d 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, netdev_features_t features) { - if (skb_vlan_tagged_multi(skb)) - features = netdev_intersect_features(features, - NETIF_F_SG | - NETIF_F_HIGHDMA | - NETIF_F_FRAGLIST | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); + if (skb_vlan_tagged_multi(skb)) { + /* In the case of multi-tagged packets, use a direct mask + * instead of using netdev_interesect_features(), to make + * sure that only devices supporting NETIF_F_HW_CSUM will + * have checksum offloading support. + */ + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + } return features; } -- cgit v1.2.3 From 367a8ce896f14018cc2c6cf2681aa440fff274f4 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 23 May 2017 09:42:37 -0700 Subject: net_sched: only create filter chains for new filters/actions tcf_chain_get() always creates a new filter chain if not found in existing ones. This is totally unnecessary when we get or delete filters, new chain should be only created for new filters (or new actions). Fixes: 5bc1701881e3 ("net: sched: introduce multichain support for filters") Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 ++- net/sched/act_api.c | 2 +- net/sched/cls_api.c | 13 +++++++++---- 3 files changed, 12 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 2c213a69c196..f7762295b7b8 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -18,7 +18,8 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #ifdef CONFIG_NET_CLS -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index); +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create); void tcf_chain_put(struct tcf_chain *chain); int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 0ecf2a858767..aed6cf2e9fd8 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -34,7 +34,7 @@ static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) if (!tp) return -EINVAL; - a->goto_chain = tcf_chain_get(tp->chain->block, chain_index); + a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true); if (!a->goto_chain) return -ENOMEM; return 0; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 89fbb35bc666..39da0c5801c9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -220,7 +220,8 @@ static void tcf_chain_destroy(struct tcf_chain *chain) kfree(chain); } -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index) +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create) { struct tcf_chain *chain; @@ -230,7 +231,10 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index) return chain; } } - return tcf_chain_create(block, chain_index); + if (create) + return tcf_chain_create(block, chain_index); + else + return NULL; } EXPORT_SYMBOL(tcf_chain_get); @@ -511,9 +515,10 @@ replay: err = -EINVAL; goto errout; } - chain = tcf_chain_get(block, chain_index); + chain = tcf_chain_get(block, chain_index, + n->nlmsg_type == RTM_NEWTFILTER); if (!chain) { - err = -ENOMEM; + err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; goto errout; } -- cgit v1.2.3 From 614d0d77b49a9b131e58b77473698ab5b2c525b7 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 25 May 2017 01:05:09 +0200 Subject: bpf: add various verifier test cases This patch adds various verifier test cases: 1) A test case for the pruning issue when tracking alignment is used. 2) Various PTR_TO_MAP_VALUE_OR_NULL tests to make sure pointer arithmetic turns such register into UNKNOWN_VALUE type. 3) Test cases for the special treatment of LD_ABS/LD_IND to make sure verifier doesn't break calling convention here. Latter is needed, since f.e. arm64 JIT uses r1 - r5 for storing temporary data, so they really must be marked as NOT_INIT. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/filter.h | 10 ++ tools/include/linux/filter.h | 10 ++ tools/testing/selftests/bpf/test_verifier.c | 239 +++++++++++++++++++++++++++- 3 files changed, 255 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index 56197f82af45..62d948f80730 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -272,6 +272,16 @@ struct bpf_prog_aux; .off = OFF, \ .imm = IMM }) +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + /* Function call */ #define BPF_EMIT_CALL(FUNC) \ diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 390d7c9685fd..4ce25d43e8e3 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -208,6 +208,16 @@ .off = OFF, \ .imm = IMM }) +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + /* Function call */ #define BPF_EMIT_CALL(FUNC) \ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3773562056da..cabb19b1e371 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -49,6 +49,7 @@ #define MAX_NR_MAPS 4 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) +#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) struct bpf_test { const char *descr; @@ -2614,6 +2615,30 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "direct packet access: test17 (pruning, alignment)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), + BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_A(-6), + }, + .errstr = "misaligned packet access off 2+15+-4 size 4", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { @@ -3340,6 +3365,70 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, + { + "alu ops on ptr_to_map_value_or_null, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, + { + "alu ops on ptr_to_map_value_or_null, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, + { + "alu ops on ptr_to_map_value_or_null, 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, { "invalid memory access with multiple map_lookup_elem calls", .insns = { @@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = { .fixup_map_in_map = { 3 }, .errstr = "R1 type=map_value_or_null expected=map_ptr", .result = REJECT, - } + }, + { + "ld_abs: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "ld_ind: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 1), + BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv, do_test_fixup(test, prog, map_fds); - fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len, "GPL", 0, bpf_vlog, - sizeof(bpf_vlog)); + fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, + "GPL", 0, bpf_vlog, sizeof(bpf_vlog)); expected_ret = unpriv && test->result_unpriv != UNDEF ? test->result_unpriv : test->result; -- cgit v1.2.3 From 83b4605b0c16cde5b00c8cf192408d51eab75402 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 May 2017 18:59:54 +0200 Subject: PCI/msi: fix the pci_alloc_irq_vectors_affinity stub We need to return an error for any call that asks for MSI / MSI-X vectors only, so that non-trivial fallback logic can work properly. Also valid dev->irq and use the "correct" errno value based on feedback from Linus. Signed-off-by: Christoph Hellwig Reported-by: Steven Rostedt Fixes: aff17164 ("PCI: Provide sensible IRQ vector alloc/free routines") Signed-off-by: Linus Torvalds --- include/linux/pci.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/pci.h b/include/linux/pci.h index 33c2b0b77429..fc2e832d7b9c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1342,9 +1342,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, const struct irq_affinity *aff_desc) { - if (min_vecs > 1) - return -EINVAL; - return 1; + if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) + return 1; + return -ENOSPC; } static inline void pci_free_irq_vectors(struct pci_dev *dev) -- cgit v1.2.3 From 3abd1ade6765e8edcccad6a9e1039cc709e65dde Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 25 May 2017 10:42:33 -0700 Subject: net: ipv4: refactor __ip_route_output_key_hash A later patch wants access to the fib result on an output route lookup with the rcu lock held. Refactor __ip_route_output_key_hash, pushing the logic between rcu_read_lock ... rcu_read_unlock into a new helper with the fib_result as an input arg. To keep the name length under control remove the leading underscores from the name and add _rcu to the name of the new helper indicating it is called with the rcu read lock held. Signed-off-by: David Ahern Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/net/route.h | 9 ++++++--- net/ipv4/icmp.c | 2 +- net/ipv4/route.c | 53 +++++++++++++++++++++++++++++++---------------------- 3 files changed, 38 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/net/route.h b/include/net/route.h index 2cc0e14c6359..89e4028cd063 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -113,13 +113,16 @@ struct in_device; int ip_rt_init(void); void rt_cache_flush(struct net *net); void rt_flush_dev(struct net_device *dev); -struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp, - const struct sk_buff *skb); +struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp, + const struct sk_buff *skb); +struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp, + struct fib_result *res, + const struct sk_buff *skb); static inline struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp) { - return __ip_route_output_key_hash(net, flp, NULL); + return ip_route_output_key_hash(net, flp, NULL); } struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 43318b5f5647..5610971bf859 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -489,7 +489,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); - rt = __ip_route_output_key_hash(net, fl4, skb_in); + rt = ip_route_output_key_hash(net, fl4, skb_in); if (IS_ERR(rt)) return rt; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 655d9eebe43e..c9b55cb0e316 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2246,29 +2246,40 @@ add: * Major route resolver routine. */ -struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, - const struct sk_buff *skb) +struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, + const struct sk_buff *skb) { - struct net_device *dev_out = NULL; __u8 tos = RT_FL_TOS(fl4); - unsigned int flags = 0; struct fib_result res; struct rtable *rth; - int orig_oif; - int err = -ENETUNREACH; res.tclassid = 0; res.fi = NULL; res.table = NULL; - orig_oif = fl4->flowi4_oif; - fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); rcu_read_lock(); + rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); + rcu_read_unlock(); + + return rth; +} +EXPORT_SYMBOL_GPL(ip_route_output_key_hash); + +struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, + struct fib_result *res, + const struct sk_buff *skb) +{ + struct net_device *dev_out = NULL; + int orig_oif = fl4->flowi4_oif; + unsigned int flags = 0; + struct rtable *rth; + int err = -ENETUNREACH; + if (fl4->saddr) { rth = ERR_PTR(-EINVAL); if (ipv4_is_multicast(fl4->saddr) || @@ -2354,15 +2365,15 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); dev_out = net->loopback_dev; fl4->flowi4_oif = LOOPBACK_IFINDEX; - res.type = RTN_LOCAL; + res->type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; } - err = fib_lookup(net, fl4, &res, 0); + err = fib_lookup(net, fl4, res, 0); if (err) { - res.fi = NULL; - res.table = NULL; + res->fi = NULL; + res->table = NULL; if (fl4->flowi4_oif && (ipv4_is_multicast(fl4->daddr) || !netif_index_is_l3_master(net, fl4->flowi4_oif))) { @@ -2387,43 +2398,41 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, if (fl4->saddr == 0) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_LINK); - res.type = RTN_UNICAST; + res->type = RTN_UNICAST; goto make_route; } rth = ERR_PTR(err); goto out; } - if (res.type == RTN_LOCAL) { + if (res->type == RTN_LOCAL) { if (!fl4->saddr) { - if (res.fi->fib_prefsrc) - fl4->saddr = res.fi->fib_prefsrc; + if (res->fi->fib_prefsrc) + fl4->saddr = res->fi->fib_prefsrc; else fl4->saddr = fl4->daddr; } /* L3 master device is the loopback for that domain */ - dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? : + dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : net->loopback_dev; fl4->flowi4_oif = dev_out->ifindex; flags |= RTCF_LOCAL; goto make_route; } - fib_select_path(net, &res, fl4, skb); + fib_select_path(net, res, fl4, skb); - dev_out = FIB_RES_DEV(res); + dev_out = FIB_RES_DEV(*res); fl4->flowi4_oif = dev_out->ifindex; make_route: - rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags); + rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); out: - rcu_read_unlock(); return rth; } -EXPORT_SYMBOL_GPL(__ip_route_output_key_hash); static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) { -- cgit v1.2.3 From 5510cdf7be042a1943222e19912f13a396c0b914 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 25 May 2017 10:42:34 -0700 Subject: net: ipv4: refactor ip_route_input_noref A later patch wants access to the fib result on an input route lookup with the rcu lock held. Refactor ip_route_input_noref pushing the logic between rcu_read_lock ... rcu_read_unlock into a new helper that takes the fib_result as an input arg. Signed-off-by: David Ahern Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/net/route.h | 3 +++ net/ipv4/route.c | 66 ++++++++++++++++++++++++++++++----------------------- 2 files changed, 40 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/net/route.h b/include/net/route.h index 89e4028cd063..08e689f23365 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -178,6 +178,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin); +int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, + u8 tos, struct net_device *devin, + struct fib_result *res); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c9b55cb0e316..1dc8fd1e60a9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1852,9 +1852,9 @@ static int ip_mkroute_input(struct sk_buff *skb, */ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev) + u8 tos, struct net_device *dev, + struct fib_result *res) { - struct fib_result res; struct in_device *in_dev = __in_dev_get_rcu(dev); struct ip_tunnel_info *tun_info; struct flowi4 fl4; @@ -1884,8 +1884,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) goto martian_source; - res.fi = NULL; - res.table = NULL; + res->fi = NULL; + res->table = NULL; if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) goto brd_input; @@ -1921,17 +1921,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.daddr = daddr; fl4.saddr = saddr; fl4.flowi4_uid = sock_net_uid(net, NULL); - err = fib_lookup(net, &fl4, &res, 0); + err = fib_lookup(net, &fl4, res, 0); if (err != 0) { if (!IN_DEV_FORWARD(in_dev)) err = -EHOSTUNREACH; goto no_route; } - if (res.type == RTN_BROADCAST) + if (res->type == RTN_BROADCAST) goto brd_input; - if (res.type == RTN_LOCAL) { + if (res->type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &itag); if (err < 0) @@ -1943,10 +1943,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, err = -EHOSTUNREACH; goto no_route; } - if (res.type != RTN_UNICAST) + if (res->type != RTN_UNICAST) goto martian_destination; - err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos); + err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos); out: return err; brd_input: @@ -1960,14 +1960,14 @@ brd_input: goto martian_source; } flags |= RTCF_BROADCAST; - res.type = RTN_BROADCAST; + res->type = RTN_BROADCAST; RT_CACHE_STAT_INC(in_brd); local_input: do_cache = false; - if (res.fi) { + if (res->fi) { if (!itag) { - rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input); + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); err = 0; @@ -1978,7 +1978,7 @@ local_input: } rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev, - flags | RTCF_LOCAL, res.type, + flags | RTCF_LOCAL, res->type, IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); if (!rth) goto e_nobufs; @@ -1988,18 +1988,18 @@ local_input: rth->dst.tclassid = itag; #endif rth->rt_is_input = 1; - if (res.table) - rth->rt_table_id = res.table->tb_id; + if (res->table) + rth->rt_table_id = res->table->tb_id; RT_CACHE_STAT_INC(in_slow_tot); - if (res.type == RTN_UNREACHABLE) { + if (res->type == RTN_UNREACHABLE) { rth->dst.input= ip_error; rth->dst.error= -err; rth->rt_flags &= ~RTCF_LOCAL; } if (do_cache) { - struct fib_nh *nh = &FIB_RES_NH(res); + struct fib_nh *nh = &FIB_RES_NH(*res); rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate); if (lwtunnel_input_redirect(rth->dst.lwtstate)) { @@ -2019,9 +2019,9 @@ local_input: no_route: RT_CACHE_STAT_INC(in_no_route); - res.type = RTN_UNREACHABLE; - res.fi = NULL; - res.table = NULL; + res->type = RTN_UNREACHABLE; + res->fi = NULL; + res->table = NULL; goto local_input; /* @@ -2051,11 +2051,22 @@ martian_source: int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { - int res; + struct fib_result res; + int err; tos &= IPTOS_RT_MASK; rcu_read_lock(); + err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res); + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(ip_route_input_noref); + +/* called with rcu_read_lock held */ +int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, + u8 tos, struct net_device *dev, struct fib_result *res) +{ /* Multicast recognition logic is moved from route cache to here. The problem was that too many Ethernet cards have broken/missing hardware multicast filters :-( As result the host on multicasting @@ -2070,6 +2081,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (ipv4_is_multicast(daddr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); int our = 0; + int err = -EINVAL; if (in_dev) our = ip_check_mc_rcu(in_dev, daddr, saddr, @@ -2085,7 +2097,6 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, ip_hdr(skb)->protocol); } - res = -EINVAL; if (our #ifdef CONFIG_IP_MROUTE || @@ -2093,17 +2104,14 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, IN_DEV_MFORWARD(in_dev)) #endif ) { - res = ip_route_input_mc(skb, daddr, saddr, + err = ip_route_input_mc(skb, daddr, saddr, tos, dev, our); } - rcu_read_unlock(); - return res; + return err; } - res = ip_route_input_slow(skb, daddr, saddr, tos, dev); - rcu_read_unlock(); - return res; + + return ip_route_input_slow(skb, daddr, saddr, tos, dev, res); } -EXPORT_SYMBOL(ip_route_input_noref); /* called with rcu_read_lock() */ static struct rtable *__mkroute_output(const struct fib_result *res, -- cgit v1.2.3 From 6ffd903415320d68a528865296e4740da350785e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 25 May 2017 10:42:37 -0700 Subject: net: ipv4: Save trie prefix to fib lookup result Prefix is needed for returning matching route spec on get route request. Signed-off-by: David Ahern Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/net/ip_fib.h | 1 + net/ipv4/fib_trie.c | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 42e8b8f55f7c..25f5c516afd1 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -136,6 +136,7 @@ struct fib_rule; struct fib_table; struct fib_result { + __be32 prefix; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 6d0f6c79d9aa..6e9df7d9bcc2 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1452,6 +1452,7 @@ found: if (!(fib_flags & FIB_LOOKUP_NOREF)) atomic_inc(&fi->fib_clntref); + res->prefix = htonl(n->key); res->prefixlen = KEYLENGTH - fa->fa_slen; res->nh_sel = nhsel; res->type = fa->fa_type; -- cgit v1.2.3 From 0be1b305d9b808e5b28e74f4ef807851c14c39f2 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 25 May 2017 10:42:38 -0700 Subject: net: ipv4: add new RTM_F_FIB_MATCH flag for use with RTM_GETROUTE This flag when specified will return matched fib result in response to a RTM_GETROUTE query. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 6487b21b2c1e..564790e854f7 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -278,6 +278,7 @@ enum rt_scope_t { #define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */ #define RTM_F_PREFIX 0x800 /* Prefix addresses */ #define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */ +#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */ /* Reserved table identifiers */ -- cgit v1.2.3 From 3fb07daff8e99243366a081e5129560734de4ada Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 25 May 2017 14:27:35 -0700 Subject: ipv4: add reference counting to metrics Andrey Konovalov reported crashes in ipv4_mtu() I could reproduce the issue with KASAN kernels, between 10.246.7.151 and 10.246.7.152 : 1) 20 concurrent netperf -t TCP_RR -H 10.246.7.152 -l 1000 & 2) At the same time run following loop : while : do ip ro add 10.246.7.152 dev eth0 src 10.246.7.151 mtu 1500 ip ro del 10.246.7.152 dev eth0 src 10.246.7.151 mtu 1500 done Cong Wang attempted to add back rt->fi in commit 82486aa6f1b9 ("ipv4: restore rt->fi for reference counting") but this proved to add some issues that were complex to solve. Instead, I suggested to add a refcount to the metrics themselves, being a standalone object (in particular, no reference to other objects) I tried to make this patch as small as possible to ease its backport, instead of being super clean. Note that we believe that only ipv4 dst need to take care of the metric refcount. But if this is wrong, this patch adds the basic infrastructure to extend this to other families. Many thanks to Julian Anastasov for reviewing this patch, and Cong Wang for his efforts on this problem. Fixes: 2860583fe840 ("ipv4: Kill rt->fi") Signed-off-by: Eric Dumazet Reported-by: Andrey Konovalov Reviewed-by: Julian Anastasov Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/net/dst.h | 8 +++++++- include/net/ip_fib.h | 10 +++++----- net/core/dst.c | 23 ++++++++++++++--------- net/ipv4/fib_semantics.c | 17 ++++++++++------- net/ipv4/route.c | 10 +++++++++- 5 files changed, 45 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/net/dst.h b/include/net/dst.h index 049af33da3b6..cfc043784166 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -107,10 +107,16 @@ struct dst_entry { }; }; +struct dst_metrics { + u32 metrics[RTAX_MAX]; + atomic_t refcnt; +}; +extern const struct dst_metrics dst_default_metrics; + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL +#define DST_METRICS_REFCOUNTED 0x2UL #define DST_METRICS_FLAGS 0x3UL #define __DST_METRICS_PTR(Y) \ ((u32 *)((Y) & ~DST_METRICS_FLAGS)) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6692c5758b33..f7f6aa789c61 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -114,11 +114,11 @@ struct fib_info { __be32 fib_prefsrc; u32 fib_tb_id; u32 fib_priority; - u32 *fib_metrics; -#define fib_mtu fib_metrics[RTAX_MTU-1] -#define fib_window fib_metrics[RTAX_WINDOW-1] -#define fib_rtt fib_metrics[RTAX_RTT-1] -#define fib_advmss fib_metrics[RTAX_ADVMSS-1] + struct dst_metrics *fib_metrics; +#define fib_mtu fib_metrics->metrics[RTAX_MTU-1] +#define fib_window fib_metrics->metrics[RTAX_WINDOW-1] +#define fib_rtt fib_metrics->metrics[RTAX_RTT-1] +#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] int fib_nhs; #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_weight; diff --git a/net/core/dst.c b/net/core/dst.c index 960e503b5a52..6192f11beec9 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(dst_discard_out); -const u32 dst_default_metrics[RTAX_MAX + 1] = { +const struct dst_metrics dst_default_metrics = { /* This initializer is needed to force linker to place this variable * into const section. Otherwise it might end into bss section. * We really want to avoid false sharing on this variable, and catch * any writes on it. */ - [RTAX_MAX] = 0xdeadbeef, + .refcnt = ATOMIC_INIT(1), }; void dst_init(struct dst_entry *dst, struct dst_ops *ops, @@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, if (dev) dev_hold(dev); dst->ops = ops; - dst_init_metrics(dst, dst_default_metrics, true); + dst_init_metrics(dst, dst_default_metrics.metrics, true); dst->expires = 0UL; dst->path = dst; dst->from = NULL; @@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release); u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) { - u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); + struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); if (p) { - u32 *old_p = __DST_METRICS_PTR(old); + struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); unsigned long prev, new; - memcpy(p, old_p, sizeof(u32) * RTAX_MAX); + atomic_set(&p->refcnt, 1); + memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); new = (unsigned long) p; prev = cmpxchg(&dst->_metrics, old, new); if (prev != old) { kfree(p); - p = __DST_METRICS_PTR(prev); + p = (struct dst_metrics *)__DST_METRICS_PTR(prev); if (prev & DST_METRICS_READ_ONLY) p = NULL; + } else if (prev & DST_METRICS_REFCOUNTED) { + if (atomic_dec_and_test(&old_p->refcnt)) + kfree(old_p); } } - return p; + BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); + return (u32 *)p; } EXPORT_SYMBOL(dst_cow_metrics_generic); @@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) { unsigned long prev, new; - new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; + new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; prev = cmpxchg(&dst->_metrics, old, new); if (prev == old) kfree(__DST_METRICS_PTR(old)); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index da449ddb8cc1..ad9ad4aab5da 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) static void free_fib_info_rcu(struct rcu_head *head) { struct fib_info *fi = container_of(head, struct fib_info, rcu); + struct dst_metrics *m; change_nexthops(fi) { if (nexthop_nh->nh_dev) @@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head) rt_fibinfo_free(&nexthop_nh->nh_rth_input); } endfor_nexthops(fi); - if (fi->fib_metrics != (u32 *) dst_default_metrics) - kfree(fi->fib_metrics); + m = fi->fib_metrics; + if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt)) + kfree(m); kfree(fi); } @@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) return -EINVAL; - fi->fib_metrics[type - 1] = val; + fi->fib_metrics->metrics[type - 1] = val; } if (ecn_ca) - fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; + fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; return 0; } @@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg) goto failure; fib_info_cnt++; if (cfg->fc_mx) { - fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); if (!fi->fib_metrics) goto failure; + atomic_set(&fi->fib_metrics->refcnt, 1); } else - fi->fib_metrics = (u32 *) dst_default_metrics; + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; @@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, if (fi->fib_priority && nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) goto nla_put_failure; - if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) + if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) goto nla_put_failure; if (fi->fib_prefsrc && diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 655d9eebe43e..6883b3d4ba8f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt) static void ipv4_dst_destroy(struct dst_entry *dst) { + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rtable *rt = (struct rtable *) dst; + if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt)) + kfree(p); + if (!list_empty(&rt->rt_uncached)) { struct uncached_list *ul = rt->rt_uncached_list; @@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } - dst_init_metrics(&rt->dst, fi->fib_metrics, true); + dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); + if (fi->fib_metrics != &dst_default_metrics) { + rt->dst._metrics |= DST_METRICS_REFCOUNTED; + atomic_inc(&fi->fib_metrics->refcnt); + } #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif -- cgit v1.2.3 From 1f51445af35e8477027d87ca015a10257b13f5a2 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 May 2017 08:37:23 +0200 Subject: bridge: Export VLAN filtering state It's useful for drivers supporting bridge offload to be able to query the bridge's VLAN filtering state. Currently, upon enslavement to a bridge master, the offloading driver will only learn about the bridge's VLAN filtering state after the bridge device was already linked with its slave. Being able to query the bridge's VLAN filtering state allows such drivers to forbid enslavement in case resource couldn't be allocated for a VLAN-aware bridge and also choose the correct initialization routine for the enslaved port, which is dependent on the bridge type. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 9 +++++++++ net/bridge/br_if.c | 2 +- net/bridge/br_mdb.c | 4 ++-- net/bridge/br_netlink.c | 2 +- net/bridge/br_private.h | 9 --------- net/bridge/br_vlan.c | 8 ++++++++ 6 files changed, 21 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 0c16866a7aac..d6cd103eb165 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -80,4 +80,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, } #endif +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) +bool br_vlan_enabled(const struct net_device *dev); +#else +static inline bool br_vlan_enabled(const struct net_device *dev) +{ + return false; +} +#endif + #endif diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 7f8d05cf9065..f3aef22931ab 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -138,7 +138,7 @@ void br_manage_promisc(struct net_bridge *br) /* If vlan filtering is disabled or bridge interface is placed * into promiscuous mode, place all ports in promiscuous mode. */ - if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br)) + if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev)) set_all = true; list_for_each_entry(p, &br->port_list, list) { diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index b0845480a3ae..09dcdb9c0f3c 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -599,7 +599,7 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; vg = nbp_vlan_group(p); - if (br_vlan_enabled(br) && vg && entry->vid == 0) { + if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { list_for_each_entry(v, &vg->vlan_list, vlist) { entry->vid = v->vid; err = __br_mdb_add(net, br, entry); @@ -694,7 +694,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; vg = nbp_vlan_group(p); - if (br_vlan_enabled(br) && vg && entry->vid == 0) { + if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { list_for_each_entry(v, &vg->vlan_list, vlist) { entry->vid = v->vid; err = __br_mdb_del(br, entry); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 574f78824d8a..1e63ec466d7c 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1251,7 +1251,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; - u8 vlan_enabled = br_vlan_enabled(br); + u8 vlan_enabled = br_vlan_enabled(br->dev); u64 clockval; clockval = br_timer_value(&br->hello_timer); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0d177280aa84..20626927f433 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -854,10 +854,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) return vg->pvid; } -static inline int br_vlan_enabled(struct net_bridge *br) -{ - return br->vlan_enabled; -} #else static inline bool br_allowed_ingress(const struct net_bridge *br, struct net_bridge_vlan_group *vg, @@ -945,11 +941,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) return 0; } -static inline int br_vlan_enabled(struct net_bridge *br) -{ - return 0; -} - static inline int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) { diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index b838213c408e..26a1a56639b2 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -706,6 +706,14 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) return __br_vlan_filter_toggle(br, val); } +bool br_vlan_enabled(const struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + + return !!br->vlan_enabled; +} +EXPORT_SYMBOL_GPL(br_vlan_enabled); + int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) { int err = 0; -- cgit v1.2.3 From 9341b988e606f951df57d15569a425c6c74b945e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 May 2017 08:37:24 +0200 Subject: bridge: Export multicast enabled state During enslavement to a bridge, after the CHANGEUPPER is sent, the multicast enabled state of the bridge isn't propagated down to the offloading driver unless it's changed. This patch allows such drivers to query the multicast enabled state from the bridge, so that they'll be able to correctly configure their flood tables during port enslavement. In case multicast is disabled, unregistered multicast packets can be treated as broadcast and be flooded through all the bridge ports. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 5 +++++ net/bridge/br_multicast.c | 8 ++++++++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index d6cd103eb165..3cd18ac0697f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_enabled(const struct net_device *dev); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -78,6 +79,10 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } +static inline bool br_multicast_enabled(const struct net_device *dev) +{ + return false; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index faa7261a992f..8dc5c8d69bcd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2176,6 +2176,14 @@ unlock: return err; } +bool br_multicast_enabled(const struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + + return !br->multicast_disabled; +} +EXPORT_SYMBOL_GPL(br_multicast_enabled); + int br_multicast_set_querier(struct net_bridge *br, unsigned long val) { unsigned long max_delay; -- cgit v1.2.3 From 3d3ea5af5c0b382bc9d9aed378fd814fb5d4a011 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Sat, 27 May 2017 10:14:34 -0400 Subject: rtnl: Add support for netdev event to link messages When netdev events happen, a rtnetlink_event() handler will send messages for every event in it's white list. These messages contain current information about a particular device, but they do not include the iformation about which event just happened. So, it is impossible to tell what just happend for these events. This patch adds a new extension to RTM_NEWLINK message called IFLA_EVENT that would have an encoding of event that triggered this message. This would allow the the message consumer to easily determine if it needs to perform certain actions. Signed-off-by: Vladislav Yasevich Acked-by: David Ahern Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 3 +- include/uapi/linux/if_link.h | 11 ++++++++ net/core/dev.c | 2 +- net/core/rtnetlink.c | 65 ++++++++++++++++++++++++++++++++++++++------ 4 files changed, 70 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 57e54847b0b9..dea59c8eec54 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -18,7 +18,8 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned change, gfp_t flags); + unsigned change, u32 event, + gfp_t flags); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 15ac20382aba..8ed679fe603f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -157,6 +157,7 @@ enum { IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, + IFLA_EVENT, __IFLA_MAX }; @@ -911,4 +912,14 @@ enum { #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) +enum { + IFLA_EVENT_NONE, + IFLA_EVENT_REBOOT, /* internal reset / reboot */ + IFLA_EVENT_FEATURES, /* change in offload features */ + IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ + IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ + IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ + IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ +}; + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 3d98fbf4cbb0..06e0a7492df8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7084,7 +7084,7 @@ static void rollback_registered_many(struct list_head *head) if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, + skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, GFP_KERNEL); /* diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 64953af4a3b1..9da53e43750c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -941,6 +941,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ + rtnl_xdp_size() /* IFLA_XDP */ + + nla_total_size(4) /* IFLA_EVENT */ + nla_total_size(1); /* IFLA_PROTO_DOWN */ } @@ -1282,9 +1283,40 @@ err_cancel: return err; } +static u32 rtnl_get_event(unsigned long event) +{ + u32 rtnl_event_type = IFLA_EVENT_NONE; + + switch (event) { + case NETDEV_REBOOT: + rtnl_event_type = IFLA_EVENT_REBOOT; + break; + case NETDEV_FEAT_CHANGE: + rtnl_event_type = IFLA_EVENT_FEATURES; + break; + case NETDEV_BONDING_FAILOVER: + rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; + break; + case NETDEV_NOTIFY_PEERS: + rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; + break; + case NETDEV_RESEND_IGMP: + rtnl_event_type = IFLA_EVENT_IGMP_RESEND; + break; + case NETDEV_CHANGEINFODATA: + rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; + break; + default: + break; + } + + return rtnl_event_type; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, - unsigned int flags, u32 ext_filter_mask) + unsigned int flags, u32 ext_filter_mask, + u32 event) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; @@ -1333,6 +1365,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) goto nla_put_failure; + if (event != IFLA_EVENT_NONE) { + if (nla_put_u32(skb, IFLA_EVENT, event)) + goto nla_put_failure; + } + if (rtnl_fill_link_ifmap(skb, dev)) goto nla_put_failure; @@ -1467,6 +1504,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, [IFLA_XDP] = { .type = NLA_NESTED }, + [IFLA_EVENT] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -1626,7 +1664,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, flags, - ext_filter_mask); + ext_filter_mask, 0); if (err < 0) { if (likely(skb->len)) @@ -2736,7 +2774,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, return -ENOBUFS; err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid, - nlh->nlmsg_seq, 0, 0, ext_filter_mask); + nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size */ WARN_ON(err == -EMSGSIZE); @@ -2808,7 +2846,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) } struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned int change, gfp_t flags) + unsigned int change, + u32 event, gfp_t flags) { struct net *net = dev_net(dev); struct sk_buff *skb; @@ -2819,7 +2858,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, if (skb == NULL) goto errout; - err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0); + err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -2840,18 +2879,25 @@ void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); } -void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, - gfp_t flags) +static void rtmsg_ifinfo_event(int type, struct net_device *dev, + unsigned int change, u32 event, + gfp_t flags) { struct sk_buff *skb; if (dev->reg_state != NETREG_REGISTERED) return; - skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); + skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags); if (skb) rtmsg_ifinfo_send(skb, dev, flags); } + +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, + gfp_t flags) +{ + rtmsg_ifinfo_event(type, dev, change, IFLA_EVENT_NONE, flags); +} EXPORT_SYMBOL(rtmsg_ifinfo); static int nlmsg_populate_fdb_fill(struct sk_buff *skb, @@ -4168,7 +4214,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi case NETDEV_NOTIFY_PEERS: case NETDEV_RESEND_IGMP: case NETDEV_CHANGEINFODATA: - rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); + rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), + GFP_KERNEL); break; default: break; -- cgit v1.2.3 From 7a7e96e09d463c7c3d51a51c539ae4352085ed18 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Sat, 27 May 2017 10:14:35 -0400 Subject: bonding: Prevent duplicate userspace notification Whenever a user changes bonding options, a NETDEV_CHANGEINFODATA notificatin is generated which results in a rtnelink message to be sent. While runnig 'ip monitor', we can actually see 2 messages, one a result of the event, and the other a result of state change that is generated bo netdev_state_change(). However, this is not always the case. If bonding changes were done via sysfs or ifenslave (old ioctl interface), then only 1 message is seen. This patch removes duplicate messages in the case of using netlink to configure bonding. It introduceds a separte function that triggers a netdev event and uses that function in the syfs and ioctl cases. This was discovered while auditing all the different envents and continues the effort of cleaning up duplicated netlink messages. CC: David Ahern CC: Jiri Pirko Signed-off-by: Vladislav Yasevich Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 3 ++- drivers/net/bonding/bond_options.c | 27 +++++++++++++++++++++++++-- include/net/bond_options.h | 2 ++ 3 files changed, 29 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2359478b977f..d4484d1a8164 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3488,7 +3488,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: bond_opt_initstr(&newval, slave_dev->name); - res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval); + res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, + &newval); break; default: res = -EOPNOTSUPP; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 1bcbb8913e17..8ca683396fcc 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -673,7 +673,30 @@ int __bond_opt_set(struct bonding *bond, out: if (ret) bond_opt_error_interpret(bond, opt, ret, val); - else if (bond->dev->reg_state == NETREG_REGISTERED) + + return ret; +} +/** + * __bond_opt_set_notify - set a bonding option + * @bond: target bond device + * @option: option to set + * @val: value to set it to + * + * This function is used to change the bond's option value and trigger + * a notification to user sapce. It can be used for both enabling/changing + * an option and for disabling it. RTNL lock must be obtained before calling + * this function. + */ +int __bond_opt_set_notify(struct bonding *bond, + unsigned int option, struct bond_opt_value *val) +{ + int ret = -ENOENT; + + ASSERT_RTNL(); + + ret = __bond_opt_set(bond, option, val); + + if (!ret && (bond->dev->reg_state == NETREG_REGISTERED)) call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); return ret; @@ -696,7 +719,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf) if (!rtnl_trylock()) return restart_syscall(); bond_opt_initstr(&optval, buf); - ret = __bond_opt_set(bond, option, &optval); + ret = __bond_opt_set_notify(bond, option, &optval); rtnl_unlock(); return ret; diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 1797235cd590..d79d28f5318c 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -104,6 +104,8 @@ struct bond_option { int __bond_opt_set(struct bonding *bond, unsigned int option, struct bond_opt_value *val); +int __bond_opt_set_notify(struct bonding *bond, unsigned int option, + struct bond_opt_value *val); int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, -- cgit v1.2.3 From a3995460491d4570af8e99ad34ddf6d1948254d9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 27 May 2017 10:42:25 -0700 Subject: net: phy: Relax error checking on sysfs_create_link() Some Ethernet drivers will attach/connect to a PHY device before calling register_netdevice() which is responsible for calling netdev_register_kobject() which would do the network device's kobject initialization. In such a case, sysfs_create_link() would return -ENOENT because the network device's kobject is not ready yet, and we would fail to connect to the PHY device. In order to keep things simple and symetrical, we just take the success path as indicative of the ability to access the network device's kobject, and create the second link if that's the case. Fixes: 5568363f0cb3 ("net: phy: Create sysfs reciprocal links for attached_dev/phydev") Reported-by: Woojung Hung Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 30 ++++++++++++++++++++++-------- include/linux/phy.h | 2 ++ 2 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f84414b8f2ee..37a1e98908e3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -960,15 +960,27 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->attached_dev = dev; dev->phydev = phydev; + + /* Some Ethernet drivers try to connect to a PHY device before + * calling register_netdevice() -> netdev_register_kobject() and + * does the dev->dev.kobj initialization. Here we only check for + * success which indicates that the network device kobject is + * ready. Once we do that we still need to keep track of whether + * links were successfully set up or not for phy_detach() to + * remove them accordingly. + */ + phydev->sysfs_links = false; + err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, "attached_dev"); - if (err) - goto error; + if (!err) { + err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, + "phydev"); + if (err) + goto error; - err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, - "phydev"); - if (err) - goto error; + phydev->sysfs_links = true; + } phydev->dev_flags = flags; @@ -1059,8 +1071,10 @@ void phy_detach(struct phy_device *phydev) struct mii_bus *bus; int i; - sysfs_remove_link(&dev->dev.kobj, "phydev"); - sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); + if (phydev->sysfs_links) { + sysfs_remove_link(&dev->dev.kobj, "phydev"); + sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); + } phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); diff --git a/include/linux/phy.h b/include/linux/phy.h index 5a808a26e4cf..58f1b45a4c44 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -363,6 +363,7 @@ struct phy_c45_device_ids { * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc. * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * link_timeout: The number of timer firings to wait before the @@ -399,6 +400,7 @@ struct phy_device { bool is_pseudo_fixed_link; bool has_fixups; bool suspended; + bool sysfs_links; enum phy_state state; -- cgit v1.2.3 From 118b90f3f18e733c99f0e8b98ea31a815ffc4d14 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 18 May 2017 14:10:22 +0300 Subject: drm/dp: add helper for reading DP sink/branch device desc from DPCD Reviewed-by: Daniel Vetter Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/acba54da7d80eafea9e59a893e27e3c31028c0ba.1495105635.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_dp_helper.c | 35 +++++++++++++++++++++++++++++++++++ include/drm/drm_dp_helper.h | 19 +++++++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3e5f52110ea1..52e0ca9a5bb1 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1208,3 +1208,38 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) return 0; } EXPORT_SYMBOL(drm_dp_stop_crc); + +/** + * drm_dp_read_desc - read sink/branch descriptor from DPCD + * @aux: DisplayPort AUX channel + * @desc: Device decriptor to fill from DPCD + * @is_branch: true for branch devices, false for sink devices + * + * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the + * identification. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch) +{ + struct drm_dp_dpcd_ident *ident = &desc->ident; + unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; + int ret, dev_id_len; + + ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); + if (ret < 0) + return ret; + + dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); + + DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", + is_branch ? "branch" : "sink", + (int)sizeof(ident->oui), ident->oui, + dev_id_len, ident->device_id, + ident->hw_rev >> 4, ident->hw_rev & 0xf, + ident->sw_major_rev, ident->sw_minor_rev); + + return 0; +} +EXPORT_SYMBOL(drm_dp_read_desc); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index c0bd0d7651a9..84502da177a1 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -913,4 +913,23 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux); int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); int drm_dp_stop_crc(struct drm_dp_aux *aux); +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + #endif /* _DRM_DP_HELPER_H_ */ -- cgit v1.2.3 From 76fa998acd86b6b40d0217e12af39c2406bdcd2b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 18 May 2017 14:10:24 +0300 Subject: drm/dp: start a DPCD based DP sink/branch device quirk database MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Face the fact, there are Display Port sink and branch devices out there in the wild that don't follow the Display Port specifications, or they have bugs, or just otherwise require special treatment. Start a common quirk database the drivers can query based on the DP device identification. At least for now, we leave the workarounds for the drivers to implement as they see fit. For starters, add a branch device that can't handle full 24-bit main link Mdiv and Ndiv main link attributes properly. Naturally, the workaround of reducing main link attributes for all devices ended up in regressions for other devices. So here we are. v2: Rebase on DRM DP desc read helpers v3: Fix the OUI memcmp blunder (Clint) Cc: Ville Syrjälä Cc: Dhinakaran Pandiyan Cc: Clint Taylor Cc: Adam Jackson Cc: Harry Wentland Tested-by: Clinton Taylor Reviewed-by: Clinton Taylor Reviewed-by: Daniel Vetter # v2 Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/91ec198dd95258dbf3bee2f6be739e0da73b4fdd.1495105635.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_dp_helper.c | 52 +++++++++++++++++++++++++++++++++++++++-- include/drm/drm_dp_helper.h | 32 +++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 52e0ca9a5bb1..213fb837e1c4 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1209,6 +1209,51 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) } EXPORT_SYMBOL(drm_dp_stop_crc); +struct dpcd_quirk { + u8 oui[3]; + bool is_branch; + u32 quirks; +}; + +#define OUI(first, second, third) { (first), (second), (third) } + +static const struct dpcd_quirk dpcd_quirk_list[] = { + /* Analogix 7737 needs reduced M and N at HBR2 link rates */ + { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) }, +}; + +#undef OUI + +/* + * Get a bit mask of DPCD quirks for the sink/branch device identified by + * ident. The quirk data is shared but it's up to the drivers to act on the + * data. + * + * For now, only the OUI (first three bytes) is used, but this may be extended + * to device identification string and hardware/firmware revisions later. + */ +static u32 +drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) +{ + const struct dpcd_quirk *quirk; + u32 quirks = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { + quirk = &dpcd_quirk_list[i]; + + if (quirk->is_branch != is_branch) + continue; + + if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) + continue; + + quirks |= quirk->quirks; + } + + return quirks; +} + /** * drm_dp_read_desc - read sink/branch descriptor from DPCD * @aux: DisplayPort AUX channel @@ -1231,14 +1276,17 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, if (ret < 0) return ret; + desc->quirks = drm_dp_get_quirks(ident, is_branch); + dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); - DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", + DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", is_branch ? "branch" : "sink", (int)sizeof(ident->oui), ident->oui, dev_id_len, ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf, - ident->sw_major_rev, ident->sw_minor_rev); + ident->sw_major_rev, ident->sw_minor_rev, + desc->quirks); return 0; } diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 84502da177a1..bb837310c07e 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -924,12 +924,44 @@ struct drm_dp_dpcd_ident { /** * struct drm_dp_desc - DP branch/sink device descriptor * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. */ struct drm_dp_desc { struct drm_dp_dpcd_ident ident; + u32 quirks; }; int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, bool is_branch); +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_LIMITED_M_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. + */ + DP_DPCD_QUIRK_LIMITED_M_N, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device decriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + #endif /* _DRM_DP_HELPER_H_ */ -- cgit v1.2.3 From 78055998954b7a3e6c31eb24d1d26f0b63a7ec0d Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 27 May 2017 16:19:26 -0600 Subject: net: ipv4: Add extack message for invalid prefix or length Add extack error message for invalid prefix length and invalid prefix. Example of the latter is a route spec containing 172.16.100.1/24, where the /24 mask means the lower 8-bits should be 0. Amazing how easy that one is to overlook when an EINVAL is returned. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip_fib.h | 3 ++- net/ipv4/fib_frontend.c | 7 ++++--- net/ipv4/fib_trie.c | 17 +++++++++++------ 3 files changed, 17 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index dcbfd5dfd25e..3dbfd5e6a347 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -266,7 +266,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags); int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, struct netlink_ext_ack *extack); -int fib_table_delete(struct net *, struct fib_table *, struct fib_config *); +int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, + struct netlink_ext_ack *extack); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct net *net, struct fib_table *table); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 14d2f7bd7c76..715b7967d8ea 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -588,7 +588,8 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg) if (cmd == SIOCDELRT) { tb = fib_get_table(net, cfg.fc_table); if (tb) - err = fib_table_delete(net, tb, &cfg); + err = fib_table_delete(net, tb, &cfg, + NULL); else err = -ESRCH; } else { @@ -732,7 +733,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - err = fib_table_delete(net, tb, &cfg); + err = fib_table_delete(net, tb, &cfg, extack); errout: return err; } @@ -851,7 +852,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad if (cmd == RTM_NEWROUTE) fib_table_insert(net, tb, &cfg, NULL); else - fib_table_delete(net, tb, &cfg); + fib_table_delete(net, tb, &cfg, NULL); } void fib_add_ifaddr(struct in_ifaddr *ifa) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 9bd46e1e1037..a624d380c81d 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1099,13 +1099,18 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, return 0; } -static bool fib_valid_key_len(u32 key, u8 plen) +static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) { - if (plen > KEYLENGTH) + if (plen > KEYLENGTH) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); return false; + } - if ((plen < KEYLENGTH) && (key << plen)) + if ((plen < KEYLENGTH) && (key << plen)) { + NL_SET_ERR_MSG(extack, + "Invalid prefix for given prefix length"); return false; + } return true; } @@ -1128,7 +1133,7 @@ int fib_table_insert(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen)) + if (!fib_valid_key_len(key, plen, extack)) return -EINVAL; pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); @@ -1516,7 +1521,7 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp, /* Caller must hold RTNL. */ int fib_table_delete(struct net *net, struct fib_table *tb, - struct fib_config *cfg) + struct fib_config *cfg, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *) tb->tb_data; struct fib_alias *fa, *fa_to_delete; @@ -1528,7 +1533,7 @@ int fib_table_delete(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen)) + if (!fib_valid_key_len(key, plen, extack)) return -EINVAL; l = fib_find_node(t, &tp, key); -- cgit v1.2.3 From c255bd681d1a93fff2a2c249d91449cce830ac64 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 27 May 2017 16:19:27 -0600 Subject: net: lwtunnel: Add extack to encap attr validation Pass extack down to lwtunnel_valid_encap_type and lwtunnel_valid_encap_type_attr. Add messages for unknown or unsupported encap types. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/lwtunnel.h | 13 +++++++++---- net/core/lwtunnel.c | 18 +++++++++++++----- net/ipv4/fib_frontend.c | 6 ++++-- net/ipv6/route.c | 4 ++-- 4 files changed, 28 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index ebfe237aad7e..ca6f002774ef 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -107,8 +107,10 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); -int lwtunnel_valid_encap_type(u16 encap_type); -int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len); +int lwtunnel_valid_encap_type(u16 encap_type, + struct netlink_ext_ack *extack); +int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, + struct netlink_ext_ack *extack); int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, @@ -172,11 +174,14 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, return -EOPNOTSUPP; } -static inline int lwtunnel_valid_encap_type(u16 encap_type) +static inline int lwtunnel_valid_encap_type(u16 encap_type, + struct netlink_ext_ack *extack) { + NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel"); return -EOPNOTSUPP; } -static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) +static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, + struct netlink_ext_ack *extack) { /* return 0 since we are not walking attr looking for * RTA_ENCAP_TYPE attribute on nexthops. diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index cfae3d5fe11f..ab840386a74d 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -126,14 +126,16 @@ int lwtunnel_build_state(u16 encap_type, } EXPORT_SYMBOL(lwtunnel_build_state); -int lwtunnel_valid_encap_type(u16 encap_type) +int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) { const struct lwtunnel_encap_ops *ops; int ret = -EINVAL; if (encap_type == LWTUNNEL_ENCAP_NONE || - encap_type > LWTUNNEL_ENCAP_MAX) + encap_type > LWTUNNEL_ENCAP_MAX) { + NL_SET_ERR_MSG(extack, "Unknown lwt encapsulation type"); return ret; + } rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); @@ -153,11 +155,16 @@ int lwtunnel_valid_encap_type(u16 encap_type) } } #endif - return ops ? 0 : -EOPNOTSUPP; + ret = ops ? 0 : -EOPNOTSUPP; + if (ret < 0) + NL_SET_ERR_MSG(extack, "lwt encapsulation type not supported"); + + return ret; } EXPORT_SYMBOL(lwtunnel_valid_encap_type); -int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining) +int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, + struct netlink_ext_ack *extack) { struct rtnexthop *rtnh = (struct rtnexthop *)attr; struct nlattr *nla_entype; @@ -174,7 +181,8 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining) if (nla_entype) { encap_type = nla_get_u16(nla_entype); - if (lwtunnel_valid_encap_type(encap_type) != 0) + if (lwtunnel_valid_encap_type(encap_type, + extack) != 0) return -EOPNOTSUPP; } } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 715b7967d8ea..4e678fa892dd 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -685,7 +685,8 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, break; case RTA_MULTIPATH: err = lwtunnel_valid_encap_type_attr(nla_data(attr), - nla_len(attr)); + nla_len(attr), + extack); if (err < 0) goto errout; cfg->fc_mp = nla_data(attr); @@ -702,7 +703,8 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, break; case RTA_ENCAP_TYPE: cfg->fc_encap_type = nla_get_u16(attr); - err = lwtunnel_valid_encap_type(cfg->fc_encap_type); + err = lwtunnel_valid_encap_type(cfg->fc_encap_type, + extack); if (err < 0) goto errout; break; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2fe84bdc4e60..524a76b5206e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3016,7 +3016,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, - cfg->fc_mp_len); + cfg->fc_mp_len, extack); if (err < 0) goto errout; } @@ -3035,7 +3035,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); - err = lwtunnel_valid_encap_type(cfg->fc_encap_type); + err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); if (err < 0) goto errout; } -- cgit v1.2.3 From 9ae287274817c032a4428fde84d1ab26d6b96761 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 27 May 2017 16:19:28 -0600 Subject: net: add extack arg to lwtunnel build state Pass extack arg down to lwtunnel_build_state and the build_state callbacks. Add messages for failures in lwtunnel_build_state, and add the extarg to nla_parse where possible in the build_state callbacks. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/netlink.h | 10 ++++++++++ include/net/lwtunnel.h | 9 ++++++--- net/core/lwt_bpf.c | 5 +++-- net/core/lwtunnel.c | 20 +++++++++++++++++--- net/ipv4/fib_lookup.h | 3 ++- net/ipv4/fib_semantics.c | 20 +++++++++++--------- net/ipv4/fib_trie.c | 2 +- net/ipv4/ip_tunnel_core.c | 11 +++++++---- net/ipv6/ila/ila_lwt.c | 5 +++-- net/ipv6/route.c | 2 +- net/ipv6/seg6_iptunnel.c | 5 +++-- net/mpls/mpls_iptunnel.c | 5 +++-- 12 files changed, 67 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a68aad484c69..8664fd26eb5d 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -102,6 +102,16 @@ struct netlink_ext_ack { (extack)->bad_attr = (attr); \ } while (0) +#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ + static const char __msg[] = (msg); \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (__extack) { \ + __extack->_msg = __msg; \ + __extack->bad_attr = (attr); \ + } \ +} while (0) + extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index ca6f002774ef..7c26863b8cf4 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -35,7 +35,8 @@ struct lwtunnel_state { struct lwtunnel_encap_ops { int (*build_state)(struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **ts); + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack); void (*destroy_state)(struct lwtunnel_state *lws); int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*input)(struct sk_buff *skb); @@ -114,7 +115,8 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **lws); + struct lwtunnel_state **lws, + struct netlink_ext_ack *extack); int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate); int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate); @@ -192,7 +194,8 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, static inline int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **lws) + struct lwtunnel_state **lws, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index b3bc0a31af9f..1307731ddfe4 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -240,7 +240,8 @@ static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = { static int bpf_build_state(struct nlattr *nla, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct nlattr *tb[LWT_BPF_MAX + 1]; struct lwtunnel_state *newts; @@ -250,7 +251,7 @@ static int bpf_build_state(struct nlattr *nla, if (family != AF_INET && family != AF_INET6) return -EAFNOSUPPORT; - ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, NULL); + ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, extack); if (ret < 0) return ret; diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index ab840386a74d..d9cb3532f1dd 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -103,25 +103,39 @@ EXPORT_SYMBOL(lwtunnel_encap_del_ops); int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, - const void *cfg, struct lwtunnel_state **lws) + const void *cfg, struct lwtunnel_state **lws, + struct netlink_ext_ack *extack) { const struct lwtunnel_encap_ops *ops; + bool found = false; int ret = -EINVAL; if (encap_type == LWTUNNEL_ENCAP_NONE || - encap_type > LWTUNNEL_ENCAP_MAX) + encap_type > LWTUNNEL_ENCAP_MAX) { + NL_SET_ERR_MSG_ATTR(extack, encap, + "Unknown LWT encapsulation type"); return ret; + } ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[encap_type]); if (likely(ops && ops->build_state && try_module_get(ops->owner))) { - ret = ops->build_state(encap, family, cfg, lws); + found = true; + ret = ops->build_state(encap, family, cfg, lws, extack); if (ret) module_put(ops->owner); } rcu_read_unlock(); + /* don't rely on -EOPNOTSUPP to detect match as build_state + * handlers could return it + */ + if (!found) { + NL_SET_ERR_MSG_ATTR(extack, encap, + "LWT encapsulation type not supported"); + } + return ret; } EXPORT_SYMBOL(lwtunnel_build_state); diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 2704e08545da..769ab87ebc4b 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -30,7 +30,8 @@ static inline void fib_alias_accessed(struct fib_alias *fa) void fib_release_info(struct fib_info *); struct fib_info *fib_create_info(struct fib_config *cfg, struct netlink_ext_ack *extack); -int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); +int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, + struct netlink_ext_ack *extack); int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi, unsigned int); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index de9484658232..2157dc08c407 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -532,7 +532,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, ret = lwtunnel_build_state(nla_get_u16( nla_entype), nla, AF_INET, cfg, - &lwtstate); + &lwtstate, extack); if (ret) goto errout; nexthop_nh->nh_lwtstate = @@ -614,7 +614,8 @@ static inline void fib_add_weight(struct fib_info *fi, static int fib_encap_match(u16 encap_type, struct nlattr *encap, const struct fib_nh *nh, - const struct fib_config *cfg) + const struct fib_config *cfg, + struct netlink_ext_ack *extack) { struct lwtunnel_state *lwtstate; int ret, result = 0; @@ -622,8 +623,8 @@ static int fib_encap_match(u16 encap_type, if (encap_type == LWTUNNEL_ENCAP_NONE) return 0; - ret = lwtunnel_build_state(encap_type, encap, - AF_INET, cfg, &lwtstate); + ret = lwtunnel_build_state(encap_type, encap, AF_INET, + cfg, &lwtstate, extack); if (!ret) { result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate); lwtstate_free(lwtstate); @@ -632,7 +633,8 @@ static int fib_encap_match(u16 encap_type, return result; } -int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) +int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, + struct netlink_ext_ack *extack) { #ifdef CONFIG_IP_ROUTE_MULTIPATH struct rtnexthop *rtnh; @@ -644,9 +646,9 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) if (cfg->fc_oif || cfg->fc_gw) { if (cfg->fc_encap) { - if (fib_encap_match(cfg->fc_encap_type, - cfg->fc_encap, fi->fib_nh, cfg)) - return 1; + if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap, + fi->fib_nh, cfg, extack)) + return 1; } if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) @@ -1148,7 +1150,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, } err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET, cfg, - &lwtstate); + &lwtstate, extack); if (err) goto failure; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index a624d380c81d..d56659e97a6e 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1562,7 +1562,7 @@ int fib_table_delete(struct net *net, struct fib_table *tb, fi->fib_prefsrc == cfg->fc_prefsrc) && (!cfg->fc_protocol || fi->fib_protocol == cfg->fc_protocol) && - fib_nh_match(cfg, fi) == 0) { + fib_nh_match(cfg, fi, extack) == 0) { fa_to_delete = fa; break; } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index baf196eaf1d8..90e11479c725 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -228,14 +228,16 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { static int ip_tun_build_state(struct nlattr *attr, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct ip_tunnel_info *tun_info; struct lwtunnel_state *new_state; struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; int err; - err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy, NULL); + err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy, + extack); if (err < 0) return err; @@ -325,7 +327,8 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { static int ip6_tun_build_state(struct nlattr *attr, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct ip_tunnel_info *tun_info; struct lwtunnel_state *new_state; @@ -333,7 +336,7 @@ static int ip6_tun_build_state(struct nlattr *attr, int err; err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy, - NULL); + extack); if (err < 0) return err; diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index f4a413aba423..0c02a09bc351 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -117,7 +117,8 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { static int ila_build_state(struct nlattr *nla, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct ila_lwt *ilwt; struct ila_params *p; @@ -146,7 +147,7 @@ static int ila_build_state(struct nlattr *nla, return -EINVAL; } - ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy, NULL); + ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy, extack); if (ret < 0) return ret; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 524a76b5206e..9d9b5bbea153 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1939,7 +1939,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, - &lwtstate); + &lwtstate, extack); if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 6a495490d43e..264d772d3c7d 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -326,7 +326,8 @@ drop: static int seg6_build_state(struct nlattr *nla, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct nlattr *tb[SEG6_IPTUNNEL_MAX + 1]; struct seg6_iptunnel_encap *tuninfo; @@ -336,7 +337,7 @@ static int seg6_build_state(struct nlattr *nla, int err; err = nla_parse_nested(tb, SEG6_IPTUNNEL_MAX, nla, - seg6_iptunnel_policy, NULL); + seg6_iptunnel_policy, extack); if (err < 0) return err; diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 369c7a23c86c..15e1aa708e50 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -159,7 +159,8 @@ drop: static int mpls_build_state(struct nlattr *nla, unsigned int family, const void *cfg, - struct lwtunnel_state **ts) + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) { struct mpls_iptunnel_encap *tun_encap_info; struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1]; @@ -168,7 +169,7 @@ static int mpls_build_state(struct nlattr *nla, int ret; ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla, - mpls_iptunnel_policy, NULL); + mpls_iptunnel_policy, extack); if (ret < 0) return ret; -- cgit v1.2.3 From 23c9ee4934e7a79b49151d0f05c24117d69c73fe Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 May 2017 18:12:51 -0400 Subject: net: dsa: remove dev arg of dsa_register_switch The current dsa_register_switch function takes a useless struct device pointer argument, which always equals ds->dev. Drivers either call it with ds->dev, or with the same device pointer passed to dsa_switch_alloc, which ends up being assigned to ds->dev. This patch removes the second argument of the dsa_register_switch and _dsa_register_switch functions. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/dsa_loop.c | 2 +- drivers/net/dsa/lan9303-core.c | 2 +- drivers/net/dsa/mt7530.c | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/dsa/qca8k.c | 2 +- include/net/dsa.h | 2 +- net/dsa/dsa2.c | 10 +++++----- 8 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 6a5648a9cb09..e68d368e20ac 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1989,7 +1989,7 @@ int b53_switch_register(struct b53_device *dev) pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); - return dsa_register_switch(dev->ds, dev->ds->dev); + return dsa_register_switch(dev->ds); } EXPORT_SYMBOL(b53_switch_register); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 5edf07beb9d2..79e62593ff4e 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -271,7 +271,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, ds); - return dsa_register_switch(ds, ds->dev); + return dsa_register_switch(ds); } static void dsa_loop_drv_remove(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index c8b2423c8ef7..cd76e61f1fca 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -802,7 +802,7 @@ static int lan9303_register_switch(struct lan9303 *chip) chip->ds->ops = &lan9303_switch_ops; chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7; - return dsa_register_switch(chip->ds, chip->dev); + return dsa_register_switch(chip->ds); } static void lan9303_probe_reset_gpio(struct lan9303 *chip, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 4d2f45153ede..25e00d5e0eec 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1080,7 +1080,7 @@ mt7530_probe(struct mdio_device *mdiodev) mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); - return dsa_register_switch(priv->ds, &mdiodev->dev); + return dsa_register_switch(priv->ds); } static void diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 070e82ac6132..7cf470c3e662 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3884,7 +3884,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) dev_set_drvdata(dev, ds); - return dsa_register_switch(ds, dev); + return dsa_register_switch(ds); } static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 0f6a011d8ed1..b3bee7eab45f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -958,7 +958,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); - return dsa_register_switch(priv->ds, &mdiodev->dev); + return dsa_register_switch(priv->ds); } static void diff --git a/include/net/dsa.h b/include/net/dsa.h index c0e567c0c824..d9bd6939229a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -480,7 +480,7 @@ static inline bool netdev_uses_dsa(struct net_device *dev) struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); void dsa_unregister_switch(struct dsa_switch *ds); -int dsa_register_switch(struct dsa_switch *ds, struct device *dev); +int dsa_register_switch(struct dsa_switch *ds); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 4301f52e4f5a..c0a4576db4a2 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -686,10 +686,10 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds, return ports; } -static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev) +static int _dsa_register_switch(struct dsa_switch *ds) { - struct dsa_chip_data *pdata = dev->platform_data; - struct device_node *np = dev->of_node; + struct dsa_chip_data *pdata = ds->dev->platform_data; + struct device_node *np = ds->dev->of_node; struct dsa_switch_tree *dst; struct device_node *ports; u32 tree, index; @@ -803,12 +803,12 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) } EXPORT_SYMBOL_GPL(dsa_switch_alloc); -int dsa_register_switch(struct dsa_switch *ds, struct device *dev) +int dsa_register_switch(struct dsa_switch *ds) { int err; mutex_lock(&dsa2_mutex); - err = _dsa_register_switch(ds, dev); + err = _dsa_register_switch(ds); mutex_unlock(&dsa2_mutex); return err; -- cgit v1.2.3 From d897a638e98c476c56118d0dcc1bc55450504866 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 31 May 2017 08:06:43 -0700 Subject: sched: add helper for updating statistics on all actions Forgetting to disable preemption around tcf_action_stats_update() seems to be a common mistake. Add a helper function for updating stats on all actions of a filter. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 10 +--------- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 10 +--------- drivers/net/ethernet/netronome/nfp/nfp_net_offload.c | 11 ++--------- include/net/pkt_cls.h | 19 +++++++++++++++++++ 4 files changed, 23 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d2f90ba2dbc4..7914a32a3036 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1863,9 +1863,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, { struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_flow *flow; - struct tc_action *a; struct mlx5_fc *counter; - LIST_HEAD(actions); u64 bytes; u64 packets; u64 lastuse; @@ -1884,13 +1882,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); - preempt_disable(); - - tcf_exts_to_list(f->exts, &actions); - list_for_each_entry(a, &actions, list) - tcf_action_stats_update(a, bytes, packets, lastuse); - - preempt_enable(); + tcf_exts_stats_update(f->exts, bytes, packets, lastuse); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ed75c6a85bc3..13af8e358847 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -397,8 +397,6 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule *rule; - struct tc_action *a; - LIST_HEAD(actions); u64 packets; u64 lastuse; u64 bytes; @@ -419,13 +417,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, if (err) goto err_rule_get_stats; - preempt_disable(); - - tcf_exts_to_list(f->exts, &actions); - list_for_each_entry(a, &actions, list) - tcf_action_stats_update(a, bytes, packets, lastuse); - - preempt_enable(); + tcf_exts_stats_update(f->exts, bytes, packets, lastuse); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c index cc823df12c8a..2fa7b67d0c6f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c @@ -84,8 +84,6 @@ static void nfp_net_bpf_stats_reset(struct nfp_net *nn) static int nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) { - struct tc_action *a; - LIST_HEAD(actions); u64 bytes, pkts; pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts; @@ -94,13 +92,8 @@ nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) nn->rx_filter_prev = nn->rx_filter; - preempt_disable(); - - tcf_exts_to_list(cls_bpf->exts, &actions); - list_for_each_entry(a, &actions, list) - tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change); - - preempt_enable(); + tcf_exts_stats_update(cls_bpf->exts, + bytes, pkts, nn->rx_filter_change); return 0; } diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f7762295b7b8..537d0a0ad4c4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -157,6 +157,25 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts, #endif } +static inline void +tcf_exts_stats_update(const struct tcf_exts *exts, + u64 bytes, u64 packets, u64 lastuse) +{ +#ifdef CONFIG_NET_CLS_ACT + int i; + + preempt_disable(); + + for (i = 0; i < exts->nr_actions; i++) { + struct tc_action *a = exts->actions[i]; + + tcf_action_stats_update(a, bytes, packets, lastuse); + } + + preempt_enable(); +#endif +} + /** * tcf_exts_exec - execute tc filter extensions * @skb: socket buffer -- cgit v1.2.3 From 25cdda95fda78d22d44157da15aa7ea34be3c804 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 24 May 2017 21:47:09 -0700 Subject: iscsi-target: Fix initial login PDU asynchronous socket close OOPs This patch fixes a OOPs originally introduced by: commit bb048357dad6d604520c91586334c9c230366a14 Author: Nicholas Bellinger Date: Thu Sep 5 14:54:04 2013 -0700 iscsi-target: Add sk->sk_state_change to cleanup after TCP failure which would trigger a NULL pointer dereference when a TCP connection was closed asynchronously via iscsi_target_sk_state_change(), but only when the initial PDU processing in iscsi_target_do_login() from iscsi_np process context was blocked waiting for backend I/O to complete. To address this issue, this patch makes the following changes. First, it introduces some common helper functions used for checking socket closing state, checking login_flags, and atomically checking socket closing state + setting login_flags. Second, it introduces a LOGIN_FLAGS_INITIAL_PDU bit to know when a TCP connection has dropped via iscsi_target_sk_state_change(), but the initial PDU processing within iscsi_target_do_login() in iscsi_np context is still running. For this case, it sets LOGIN_FLAGS_CLOSED, but doesn't invoke schedule_delayed_work(). The original NULL pointer dereference case reported by MNC is now handled by iscsi_target_do_login() doing a iscsi_target_sk_check_close() before transitioning to FFP to determine when the socket has already closed, or iscsi_target_start_negotiation() if the login needs to exchange more PDUs (eg: iscsi_target_do_login returned 0) but the socket has closed. For both of these cases, the cleanup up of remaining connection resources will occur in iscsi_target_start_negotiation() from iscsi_np process context once the failure is detected. Finally, to handle to case where iscsi_target_sk_state_change() is called after the initial PDU procesing is complete, it now invokes conn->login_work -> iscsi_target_do_login_rx() to perform cleanup once existing iscsi_target_sk_check_close() checks detect connection failure. For this case, the cleanup of remaining connection resources will occur in iscsi_target_do_login_rx() from delayed workqueue process context once the failure is detected. Reported-by: Mike Christie Reviewed-by: Mike Christie Tested-by: Mike Christie Cc: Mike Christie Reported-by: Hannes Reinecke Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: Varun Prakash Cc: # v3.12+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_nego.c | 194 +++++++++++++++++++++---------- include/target/iscsi/iscsi_target_core.h | 1 + 2 files changed, 133 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ccc9c1cbfd1..6f88b31242b0 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); -static bool iscsi_target_sk_state_check(struct sock *sk) +static bool __iscsi_target_sk_check_close(struct sock *sk) { if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," "returning FALSE\n"); - return false; + return true; } - return true; + return false; +} + +static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = test_bit(flag, &conn->login_flags); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + if (!state) + clear_bit(flag, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + return state; } static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) @@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", conn, current->comm, current->pid); + /* + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() + * before initial PDU processing in iscsi_target_start_negotiation() + * has completed, go ahead and retry until it's cleared. + * + * Otherwise if the TCP connection drops while this is occuring, + * iscsi_target_start_negotiation() will detect the failure, call + * cancel_delayed_work_sync(&conn->login_work), and cleanup the + * remaining iscsi connection resources from iscsi_np process context. + */ + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); + return; + } spin_lock(&tpg->tpg_state_lock); state = (tpg->tpg_state == TPG_STATE_ACTIVE); @@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) if (!state) { pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; + goto err; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (iscsi_target_sk_check_close(conn)) { + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); + goto err; } conn->login_kworker = current; @@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) flush_signals(current); conn->login_kworker = NULL; - if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (rc < 0) + goto err; pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", conn, current->comm, current->pid); rc = iscsi_target_do_login(conn, login); if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); + goto err; } else if (!rc) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - write_lock_bh(&sk->sk_callback_lock); - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) + goto err; } else if (rc == 1) { iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); } + return; + +err: + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); } static void iscsi_target_do_cleanup(struct work_struct *work) @@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) orig_state_change(sk); return; } + state = __iscsi_target_sk_check_close(sk); + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" " conn: %p\n", conn); + if (state) + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", conn); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } + /* + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, + * but only queue conn->login_work -> iscsi_target_do_login_rx() + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. + * + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() + * will detect the dropped TCP connection from delayed workqueue context. + * + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() + * via iscsi_target_sk_check_and_clear() is responsible for detecting the + * dropped TCP connection in iscsi_np process context, and cleaning up + * the remaining iscsi connection resources. + */ + if (state) { + pr_debug("iscsi_target_sk_state_change got failed state\n"); + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - write_unlock_bh(&sk->sk_callback_lock); - - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); + orig_state_change(sk); - if (!state) { - pr_debug("iscsi_target_sk_state_change got failed state\n"); - schedule_delayed_work(&conn->login_cleanup_work, 0); + if (!state) + schedule_delayed_work(&conn->login_work, 0); return; } + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); } @@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo if (iscsi_target_handle_csg_one(conn, login) < 0) return -1; if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { + /* + * Check to make sure the TCP connection has not + * dropped asynchronously while session reinstatement + * was occuring in this kthread context, before + * transitioning to full feature phase operation. + */ + if (iscsi_target_sk_check_close(conn)) + return -1; + login->tsih = conn->sess->tsih; login->login_complete = 1; iscsi_target_restore_sock_callbacks(conn); @@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo break; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - bool state; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login() failed state for" - " conn: %p\n", conn); - return -1; - } - } - return 0; } @@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation( write_lock_bh(&sk->sk_callback_lock); set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); write_unlock_bh(&sk->sk_callback_lock); } - + /* + * If iscsi_target_do_login returns zero to signal more PDU + * exchanges are required to complete the login, go ahead and + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection + * is still active. + * + * Otherwise if TCP connection dropped asynchronously, go ahead + * and perform connection cleanup now. + */ ret = iscsi_target_do_login(conn, login); + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 275581d483dd..5f17fb770477 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -557,6 +557,7 @@ struct iscsi_conn { #define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_CLOSED 2 #define LOGIN_FLAGS_READY 4 +#define LOGIN_FLAGS_INITIAL_PDU 8 unsigned long login_flags; struct delayed_work login_work; struct delayed_work login_cleanup_work; -- cgit v1.2.3 From 71189fa9b092ef125ee741eccb2f5fa916798afd Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 30 May 2017 13:31:27 -0700 Subject: bpf: free up BPF_JMP | BPF_CALL | BPF_X opcode free up BPF_JMP | BPF_CALL | BPF_X opcode to be used by actual indirect call by register and use kernel internal opcode to mark call instruction into bpf_tail_call() helper. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 2 +- arch/powerpc/net/bpf_jit_comp64.c | 2 +- arch/s390/net/bpf_jit_comp.c | 2 +- arch/sparc/net/bpf_jit_comp_64.c | 2 +- arch/x86/net/bpf_jit_comp.c | 2 +- include/linux/filter.h | 3 +++ kernel/bpf/core.c | 2 +- kernel/bpf/verifier.c | 2 +- 8 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 71f930501ade..b1d38eeb24f6 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -586,7 +586,7 @@ emit_cond_jmp: break; } /* tail call */ - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: if (emit_bpf_tail_call(ctx)) return -EFAULT; break; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index aee2bb817ac6..a01366584a4b 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -938,7 +938,7 @@ common_load: /* * Tail call */ - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); break; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6e97a2e3fd8d..42ad3832586c 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i } break; } - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: /* * Implicit input: * B1: pointer to ctx diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 21de77419f48..4a52d34facf9 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) } /* tail call */ - case BPF_JMP | BPF_CALL |BPF_X: + case BPF_JMP | BPF_TAIL_CALL: emit_tail_call(ctx); break; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index f58939393eef..fec12eaa0dec 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -877,7 +877,7 @@ xadd: if (is_imm8(insn->off)) } break; - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: emit_bpf_tail_call(&prog); break; diff --git a/include/linux/filter.h b/include/linux/filter.h index 62d948f80730..a20ba40fcb73 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -57,6 +57,9 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* unused opcode to mark special call to bpf_tail_call() helper */ +#define BPF_TAIL_CALL 0xf0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index dedf367f59bb..339289402b96 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -824,7 +824,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, /* Call instruction */ [BPF_JMP | BPF_CALL] = &&JMP_CALL, - [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL, + [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, /* Jumps */ [BPF_JMP | BPF_JA] = &&JMP_JA, [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 339c8a1371de..28113d0e8e92 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3469,7 +3469,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) * that doesn't support bpf_tail_call yet */ insn->imm = 0; - insn->code |= BPF_X; + insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } -- cgit v1.2.3 From 8726679a0fa317f8e83d0843b266453f31bff092 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 30 May 2017 13:31:29 -0700 Subject: bpf: teach verifier to track stack depth teach verifier to track bpf program stack depth Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + kernel/bpf/verifier.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6bb38d76faf4..fcc80ca11045 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -171,6 +171,7 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + u32 stack_depth; struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 28113d0e8e92..d96f27ff9f6f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -926,6 +926,10 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, verbose("invalid stack off=%d size=%d\n", off, size); return -EACCES; } + + if (env->prog->aux->stack_depth < -off) + env->prog->aux->stack_depth = -off; + if (t == BPF_WRITE) { if (!env->allow_ptr_leaks && state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && @@ -1032,6 +1036,9 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, return -EACCES; } + if (env->prog->aux->stack_depth < -off) + env->prog->aux->stack_depth = -off; + if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; @@ -3167,7 +3174,8 @@ process_bpf_exit: insn_idx++; } - verbose("processed %d insns\n", insn_processed); + verbose("processed %d insns, stack depth %d\n", + insn_processed, env->prog->aux->stack_depth); return 0; } -- cgit v1.2.3 From 8b8010fb7876e816300ddd60fa089e9ceb209f3e Mon Sep 17 00:00:00 2001 From: Woojung Huh Date: Wed, 31 May 2017 20:19:06 +0000 Subject: dsa: add support for Microchip KSZ tail tagging Adding support for the Microchip KSZ switch family tail tagging. Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- include/net/dsa.h | 1 + net/dsa/Kconfig | 3 ++ net/dsa/Makefile | 1 + net/dsa/dsa.c | 3 ++ net/dsa/dsa_priv.h | 3 ++ net/dsa/tag_ksz.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 112 insertions(+) create mode 100644 net/dsa/tag_ksz.c (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index d9bd6939229a..7de1234ba136 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -31,6 +31,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_BRCM, DSA_TAG_PROTO_DSA, DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_KSZ, DSA_TAG_PROTO_LAN9303, DSA_TAG_PROTO_MTK, DSA_TAG_PROTO_QCA, diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 297389b2ab35..cc5f8f971689 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -25,6 +25,9 @@ config NET_DSA_TAG_DSA config NET_DSA_TAG_EDSA bool +config NET_DSA_TAG_KSZ + bool + config NET_DSA_TAG_LAN9303 bool diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 90e5aa6f7d0f..fcce25da937c 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -6,6 +6,7 @@ dsa_core-y += dsa.o dsa2.o legacy.o port.o slave.o switch.o dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o +dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 3288a80d4d6c..402459e73f33 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -49,6 +49,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { #ifdef CONFIG_NET_DSA_TAG_EDSA [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops, #endif +#ifdef CONFIG_NET_DSA_TAG_KSZ + [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops, +#endif #ifdef CONFIG_NET_DSA_TAG_LAN9303 [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops, #endif diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index c1d4180651af..7459d5735d8b 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -167,6 +167,9 @@ extern const struct dsa_device_ops dsa_netdev_ops; /* tag_edsa.c */ extern const struct dsa_device_ops edsa_netdev_ops; +/* tag_ksz.c */ +extern const struct dsa_device_ops ksz_netdev_ops; + /* tag_lan9303.c */ extern const struct dsa_device_ops lan9303_netdev_ops; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c new file mode 100644 index 000000000000..0b08a40b493a --- /dev/null +++ b/net/dsa/tag_ksz.c @@ -0,0 +1,101 @@ +/* + * net/dsa/tag_ksz.c - Microchip KSZ Switch tag format handling + * Copyright (c) 2017 Microchip Technology + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include "dsa_priv.h" + +/* For Ingress (Host -> KSZ), 2 bytes are added before FCS. + * --------------------------------------------------------------------------- + * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes) + * --------------------------------------------------------------------------- + * tag0 : Prioritization (not used now) + * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5) + * + * For Egress (KSZ -> Host), 1 byte is added before FCS. + * --------------------------------------------------------------------------- + * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes) + * --------------------------------------------------------------------------- + * tag0 : zero-based value represents port + * (eg, 0x00=port1, 0x02=port3, 0x06=port7) + */ + +#define KSZ_INGRESS_TAG_LEN 2 +#define KSZ_EGRESS_TAG_LEN 1 + +static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct sk_buff *nskb; + int padlen; + u8 *tag; + + padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len; + + if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) { + nskb = skb; + } else { + nskb = alloc_skb(NET_IP_ALIGN + skb->len + + padlen + KSZ_INGRESS_TAG_LEN, GFP_ATOMIC); + if (!nskb) { + kfree_skb(skb); + return NULL; + } + skb_reserve(nskb, NET_IP_ALIGN); + + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, + skb_network_header(skb) - skb->head); + skb_set_transport_header(nskb, + skb_transport_header(skb) - skb->head); + skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); + kfree_skb(skb); + } + + /* skb is freed when it fails */ + if (skb_put_padto(nskb, nskb->len + padlen)) + return NULL; + + tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); + tag[0] = 0; + tag[1] = 1 << p->dp->index; /* destination port */ + + return nskb; +} + +struct sk_buff *ksz_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds; + u8 *tag; + int source_port; + + ds = dst->cpu_dp->ds; + + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + + source_port = tag[0] & 7; + if (source_port >= ds->num_ports || !ds->ports[source_port].netdev) + return NULL; + + pskb_trim_rcsum(skb, skb->len - KSZ_EGRESS_TAG_LEN); + + skb->dev = ds->ports[source_port].netdev; + + return skb; +} + +const struct dsa_device_ops ksz_netdev_ops = { + .xmit = ksz_xmit, + .rcv = ksz_rcv, +}; -- cgit v1.2.3 From fc3973a1fa090d5f5437621a9ae1f2232a04ee5b Mon Sep 17 00:00:00 2001 From: Woojung Huh Date: Wed, 31 May 2017 20:19:13 +0000 Subject: phy: micrel: add Microchip KSZ 9477 Switch PHY support Adding Microchip 9477 Phy included in KSZ9477 Switch. Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 11 +++++++++++ include/linux/micrel_phy.h | 2 ++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 4cfd54182da2..46e80bcc7a8a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -20,6 +20,7 @@ * ksz8081, ksz8091, * ksz8061, * Switch : ksz8873, ksz886x + * ksz9477 */ #include @@ -996,6 +997,16 @@ static struct phy_driver ksphy_driver[] = { .read_status = ksz8873mll_read_status, .suspend = genphy_suspend, .resume = genphy_resume, +}, { + .phy_id = PHY_ID_KSZ9477, + .phy_id_mask = MICREL_PHY_ID_MASK, + .name = "Microchip KSZ9477", + .features = PHY_GBIT_FEATURES, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, } }; module_phy_driver(ksphy_driver); diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index f541da68d1e7..472fa4d4ea62 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,8 @@ #define PHY_ID_KSZ8795 0x00221550 +#define PHY_ID_KSZ9477 0x00221631 + /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 -- cgit v1.2.3 From b987e98e50ab90e5291581204ef7a1c649313a70 Mon Sep 17 00:00:00 2001 From: Woojung Huh Date: Wed, 31 May 2017 20:19:19 +0000 Subject: dsa: add DSA switch driver for Microchip KSZ9477 The KSZ9477 is a fully integrated layer 2, managed, 7 ports GigE switch with numerous advanced features. 5 ports incorporate 10/100/1000 Mbps PHYs. The other 2 ports have interfaces that can be configured as SGMII, RGMII, MII or RMII. Either of these may connect directly to a host processor or to an external PHY. The SGMII port may interface to a fiber optic transceiver. This driver currently supports vlan, fdb, mdb & mirror dsa switch operations. Reviewed-by: Florian Fainelli Signed-off-by: Woojung Huh Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 2 + drivers/net/dsa/Makefile | 1 + drivers/net/dsa/microchip/Kconfig | 12 + drivers/net/dsa/microchip/Makefile | 2 + drivers/net/dsa/microchip/ksz_9477_reg.h | 1676 +++++++++++++++++++++++++++ drivers/net/dsa/microchip/ksz_common.c | 1279 ++++++++++++++++++++ drivers/net/dsa/microchip/ksz_priv.h | 210 ++++ drivers/net/dsa/microchip/ksz_spi.c | 216 ++++ include/linux/platform_data/microchip-ksz.h | 29 + 9 files changed, 3427 insertions(+) create mode 100644 drivers/net/dsa/microchip/Kconfig create mode 100644 drivers/net/dsa/microchip/Makefile create mode 100644 drivers/net/dsa/microchip/ksz_9477_reg.h create mode 100644 drivers/net/dsa/microchip/ksz_common.c create mode 100644 drivers/net/dsa/microchip/ksz_priv.h create mode 100644 drivers/net/dsa/microchip/ksz_spi.c create mode 100644 include/linux/platform_data/microchip-ksz.h (limited to 'include') diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 68131a45ac5e..83a9bc892a3b 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -39,6 +39,8 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. +source "drivers/net/dsa/microchip/Kconfig" + source "drivers/net/dsa/mv88e6xxx/Kconfig" config NET_DSA_QCA8K diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 9613f36083a6..4a5b5bd297ee 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o obj-y += b53/ +obj-y += microchip/ obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig new file mode 100644 index 000000000000..a8b8f59099ce --- /dev/null +++ b/drivers/net/dsa/microchip/Kconfig @@ -0,0 +1,12 @@ +menuconfig MICROCHIP_KSZ + tristate "Microchip KSZ series switch support" + depends on NET_DSA + select NET_DSA_TAG_KSZ + help + This driver adds support for Microchip KSZ switch chips. + +config MICROCHIP_KSZ_SPI_DRIVER + tristate "KSZ series SPI connected switch driver" + depends on MICROCHIP_KSZ && SPI + help + Select to enable support for registering switches configured through SPI. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile new file mode 100644 index 000000000000..ed335e29fae8 --- /dev/null +++ b/drivers/net/dsa/microchip/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MICROCHIP_KSZ) += ksz_common.o +obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER) += ksz_spi.o diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz_9477_reg.h new file mode 100644 index 000000000000..6aa6752035a1 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_9477_reg.h @@ -0,0 +1,1676 @@ +/* + * Microchip KSZ9477 register definitions + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __KSZ9477_REGS_H +#define __KSZ9477_REGS_H + +#define KS_PRIO_M 0x7 +#define KS_PRIO_S 4 + +/* 0 - Operation */ +#define REG_CHIP_ID0__1 0x0000 + +#define REG_CHIP_ID1__1 0x0001 + +#define FAMILY_ID 0x95 +#define FAMILY_ID_94 0x94 +#define FAMILY_ID_95 0x95 +#define FAMILY_ID_85 0x85 +#define FAMILY_ID_98 0x98 +#define FAMILY_ID_88 0x88 + +#define REG_CHIP_ID2__1 0x0002 + +#define CHIP_ID_63 0x63 +#define CHIP_ID_66 0x66 +#define CHIP_ID_67 0x67 +#define CHIP_ID_77 0x77 +#define CHIP_ID_93 0x93 +#define CHIP_ID_96 0x96 +#define CHIP_ID_97 0x97 + +#define REG_CHIP_ID3__1 0x0003 + +#define SWITCH_REVISION_M 0x0F +#define SWITCH_REVISION_S 4 +#define SWITCH_RESET 0x01 + +#define REG_SW_PME_CTRL 0x0006 + +#define PME_ENABLE BIT(1) +#define PME_POLARITY BIT(0) + +#define REG_GLOBAL_OPTIONS 0x000F + +#define SW_GIGABIT_ABLE BIT(6) +#define SW_REDUNDANCY_ABLE BIT(5) +#define SW_AVB_ABLE BIT(4) +#define SW_9567_RL_5_2 0xC +#define SW_9477_SL_5_2 0xD + +#define SW_9896_GL_5_1 0xB +#define SW_9896_RL_5_1 0x8 +#define SW_9896_SL_5_1 0x9 + +#define SW_9895_GL_4_1 0x7 +#define SW_9895_RL_4_1 0x4 +#define SW_9895_SL_4_1 0x5 + +#define SW_9896_RL_4_2 0x6 + +#define SW_9893_RL_2_1 0x0 +#define SW_9893_SL_2_1 0x1 +#define SW_9893_GL_2_1 0x3 + +#define SW_QW_ABLE BIT(5) +#define SW_9893_RN_2_1 0xC + +#define REG_SW_INT_STATUS__4 0x0010 +#define REG_SW_INT_MASK__4 0x0014 + +#define LUE_INT BIT(31) +#define TRIG_TS_INT BIT(30) +#define APB_TIMEOUT_INT BIT(29) + +#define SWITCH_INT_MASK (TRIG_TS_INT | APB_TIMEOUT_INT) + +#define REG_SW_PORT_INT_STATUS__4 0x0018 +#define REG_SW_PORT_INT_MASK__4 0x001C +#define REG_SW_PHY_INT_STATUS 0x0020 +#define REG_SW_PHY_INT_ENABLE 0x0024 + +/* 1 - Global */ +#define REG_SW_GLOBAL_SERIAL_CTRL_0 0x0100 +#define SW_SPARE_REG_2 BIT(7) +#define SW_SPARE_REG_1 BIT(6) +#define SW_SPARE_REG_0 BIT(5) +#define SW_BIG_ENDIAN BIT(4) +#define SPI_AUTO_EDGE_DETECTION BIT(1) +#define SPI_CLOCK_OUT_RISING_EDGE BIT(0) + +#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103 +#define SW_ENABLE_REFCLKO BIT(1) +#define SW_REFCLKO_IS_125MHZ BIT(0) + +#define REG_SW_IBA__4 0x0104 + +#define SW_IBA_ENABLE BIT(31) +#define SW_IBA_DA_MATCH BIT(30) +#define SW_IBA_INIT BIT(29) +#define SW_IBA_QID_M 0xF +#define SW_IBA_QID_S 22 +#define SW_IBA_PORT_M 0x2F +#define SW_IBA_PORT_S 16 +#define SW_IBA_FRAME_TPID_M 0xFFFF + +#define REG_SW_APB_TIMEOUT_ADDR__4 0x0108 + +#define APB_TIMEOUT_ACKNOWLEDGE BIT(31) + +#define REG_SW_IBA_SYNC__1 0x010C + +#define REG_SW_IO_STRENGTH__1 0x010D +#define SW_DRIVE_STRENGTH_M 0x7 +#define SW_DRIVE_STRENGTH_2MA 0 +#define SW_DRIVE_STRENGTH_4MA 1 +#define SW_DRIVE_STRENGTH_8MA 2 +#define SW_DRIVE_STRENGTH_12MA 3 +#define SW_DRIVE_STRENGTH_16MA 4 +#define SW_DRIVE_STRENGTH_20MA 5 +#define SW_DRIVE_STRENGTH_24MA 6 +#define SW_DRIVE_STRENGTH_28MA 7 +#define SW_HI_SPEED_DRIVE_STRENGTH_S 4 +#define SW_LO_SPEED_DRIVE_STRENGTH_S 0 + +#define REG_SW_IBA_STATUS__4 0x0110 + +#define SW_IBA_REQ BIT(31) +#define SW_IBA_RESP BIT(30) +#define SW_IBA_DA_MISMATCH BIT(14) +#define SW_IBA_FMT_MISMATCH BIT(13) +#define SW_IBA_CODE_ERROR BIT(12) +#define SW_IBA_CMD_ERROR BIT(11) +#define SW_IBA_CMD_LOC_M (BIT(6) - 1) + +#define REG_SW_IBA_STATES__4 0x0114 + +#define SW_IBA_BUF_STATE_S 30 +#define SW_IBA_CMD_STATE_S 28 +#define SW_IBA_RESP_STATE_S 26 +#define SW_IBA_STATE_M 0x3 +#define SW_IBA_PACKET_SIZE_M 0x7F +#define SW_IBA_PACKET_SIZE_S 16 +#define SW_IBA_FMT_ID_M 0xFFFF + +#define REG_SW_IBA_RESULT__4 0x0118 + +#define SW_IBA_SIZE_S 24 + +#define SW_IBA_RETRY_CNT_M (BIT(5) - 1) + +/* 2 - PHY */ +#define REG_SW_POWER_MANAGEMENT_CTRL 0x0201 + +#define SW_PLL_POWER_DOWN BIT(5) +#define SW_POWER_DOWN_MODE 0x3 +#define SW_ENERGY_DETECTION 1 +#define SW_SOFT_POWER_DOWN 2 +#define SW_POWER_SAVING 3 + +/* 3 - Operation Control */ +#define REG_SW_OPERATION 0x0300 + +#define SW_DOUBLE_TAG BIT(7) +#define SW_RESET BIT(1) +#define SW_START BIT(0) + +#define REG_SW_MAC_ADDR_0 0x0302 +#define REG_SW_MAC_ADDR_1 0x0303 +#define REG_SW_MAC_ADDR_2 0x0304 +#define REG_SW_MAC_ADDR_3 0x0305 +#define REG_SW_MAC_ADDR_4 0x0306 +#define REG_SW_MAC_ADDR_5 0x0307 + +#define REG_SW_MTU__2 0x0308 + +#define REG_SW_ISP_TPID__2 0x030A + +#define REG_SW_HSR_TPID__2 0x030C + +#define REG_AVB_STRATEGY__2 0x030E + +#define SW_SHAPING_CREDIT_ACCT BIT(1) +#define SW_POLICING_CREDIT_ACCT BIT(0) + +#define REG_SW_LUE_CTRL_0 0x0310 + +#define SW_VLAN_ENABLE BIT(7) +#define SW_DROP_INVALID_VID BIT(6) +#define SW_AGE_CNT_M 0x7 +#define SW_AGE_CNT_S 3 +#define SW_RESV_MCAST_ENABLE BIT(2) +#define SW_HASH_OPTION_M 0x03 +#define SW_HASH_OPTION_CRC 1 +#define SW_HASH_OPTION_XOR 2 +#define SW_HASH_OPTION_DIRECT 3 + +#define REG_SW_LUE_CTRL_1 0x0311 + +#define UNICAST_LEARN_DISABLE BIT(7) +#define SW_SRC_ADDR_FILTER BIT(6) +#define SW_FLUSH_STP_TABLE BIT(5) +#define SW_FLUSH_MSTP_TABLE BIT(4) +#define SW_FWD_MCAST_SRC_ADDR BIT(3) +#define SW_AGING_ENABLE BIT(2) +#define SW_FAST_AGING BIT(1) +#define SW_LINK_AUTO_AGING BIT(0) + +#define REG_SW_LUE_CTRL_2 0x0312 + +#define SW_TRAP_DOUBLE_TAG BIT(6) +#define SW_EGRESS_VLAN_FILTER_DYN BIT(5) +#define SW_EGRESS_VLAN_FILTER_STA BIT(4) +#define SW_FLUSH_OPTION_M 0x3 +#define SW_FLUSH_OPTION_S 2 +#define SW_FLUSH_OPTION_DYN_MAC 1 +#define SW_FLUSH_OPTION_STA_MAC 2 +#define SW_FLUSH_OPTION_BOTH 3 +#define SW_PRIO_M 0x3 +#define SW_PRIO_DA 0 +#define SW_PRIO_SA 1 +#define SW_PRIO_HIGHEST_DA_SA 2 +#define SW_PRIO_LOWEST_DA_SA 3 + +#define REG_SW_LUE_CTRL_3 0x0313 + +#define REG_SW_LUE_INT_STATUS 0x0314 +#define REG_SW_LUE_INT_ENABLE 0x0315 + +#define LEARN_FAIL_INT BIT(2) +#define ALMOST_FULL_INT BIT(1) +#define WRITE_FAIL_INT BIT(0) + +#define REG_SW_LUE_INDEX_0__2 0x0316 + +#define ENTRY_INDEX_M 0x0FFF + +#define REG_SW_LUE_INDEX_1__2 0x0318 + +#define FAIL_INDEX_M 0x03FF + +#define REG_SW_LUE_INDEX_2__2 0x031A + +#define REG_SW_LUE_UNK_UCAST_CTRL__4 0x0320 + +#define SW_UNK_UCAST_ENABLE BIT(31) + +#define REG_SW_LUE_UNK_MCAST_CTRL__4 0x0324 + +#define SW_UNK_MCAST_ENABLE BIT(31) + +#define REG_SW_LUE_UNK_VID_CTRL__4 0x0328 + +#define SW_UNK_VID_ENABLE BIT(31) + +#define REG_SW_MAC_CTRL_0 0x0330 + +#define SW_NEW_BACKOFF BIT(7) +#define SW_CHECK_LENGTH BIT(3) +#define SW_PAUSE_UNH_MODE BIT(1) +#define SW_AGGR_BACKOFF BIT(0) + +#define REG_SW_MAC_CTRL_1 0x0331 + +#define MULTICAST_STORM_DISABLE BIT(6) +#define SW_BACK_PRESSURE BIT(5) +#define FAIR_FLOW_CTRL BIT(4) +#define NO_EXC_COLLISION_DROP BIT(3) +#define SW_JUMBO_PACKET BIT(2) +#define SW_LEGAL_PACKET_DISABLE BIT(1) +#define SW_PASS_SHORT_FRAME BIT(0) + +#define REG_SW_MAC_CTRL_2 0x0332 + +#define SW_REPLACE_VID BIT(3) +#define BROADCAST_STORM_RATE_HI 0x07 + +#define REG_SW_MAC_CTRL_3 0x0333 + +#define BROADCAST_STORM_RATE_LO 0xFF +#define BROADCAST_STORM_RATE 0x07FF + +#define REG_SW_MAC_CTRL_4 0x0334 + +#define SW_PASS_PAUSE BIT(3) + +#define REG_SW_MAC_CTRL_5 0x0335 + +#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3) + +#define REG_SW_MAC_CTRL_6 0x0336 + +#define SW_MIB_COUNTER_FLUSH BIT(7) +#define SW_MIB_COUNTER_FREEZE BIT(6) + +#define REG_SW_MAC_802_1P_MAP_0 0x0338 +#define REG_SW_MAC_802_1P_MAP_1 0x0339 +#define REG_SW_MAC_802_1P_MAP_2 0x033A +#define REG_SW_MAC_802_1P_MAP_3 0x033B + +#define SW_802_1P_MAP_M KS_PRIO_M +#define SW_802_1P_MAP_S KS_PRIO_S + +#define REG_SW_MAC_ISP_CTRL 0x033C + +#define REG_SW_MAC_TOS_CTRL 0x033E + +#define SW_TOS_DSCP_REMARK BIT(1) +#define SW_TOS_DSCP_REMAP BIT(0) + +#define REG_SW_MAC_TOS_PRIO_0 0x0340 +#define REG_SW_MAC_TOS_PRIO_1 0x0341 +#define REG_SW_MAC_TOS_PRIO_2 0x0342 +#define REG_SW_MAC_TOS_PRIO_3 0x0343 +#define REG_SW_MAC_TOS_PRIO_4 0x0344 +#define REG_SW_MAC_TOS_PRIO_5 0x0345 +#define REG_SW_MAC_TOS_PRIO_6 0x0346 +#define REG_SW_MAC_TOS_PRIO_7 0x0347 +#define REG_SW_MAC_TOS_PRIO_8 0x0348 +#define REG_SW_MAC_TOS_PRIO_9 0x0349 +#define REG_SW_MAC_TOS_PRIO_10 0x034A +#define REG_SW_MAC_TOS_PRIO_11 0x034B +#define REG_SW_MAC_TOS_PRIO_12 0x034C +#define REG_SW_MAC_TOS_PRIO_13 0x034D +#define REG_SW_MAC_TOS_PRIO_14 0x034E +#define REG_SW_MAC_TOS_PRIO_15 0x034F +#define REG_SW_MAC_TOS_PRIO_16 0x0350 +#define REG_SW_MAC_TOS_PRIO_17 0x0351 +#define REG_SW_MAC_TOS_PRIO_18 0x0352 +#define REG_SW_MAC_TOS_PRIO_19 0x0353 +#define REG_SW_MAC_TOS_PRIO_20 0x0354 +#define REG_SW_MAC_TOS_PRIO_21 0x0355 +#define REG_SW_MAC_TOS_PRIO_22 0x0356 +#define REG_SW_MAC_TOS_PRIO_23 0x0357 +#define REG_SW_MAC_TOS_PRIO_24 0x0358 +#define REG_SW_MAC_TOS_PRIO_25 0x0359 +#define REG_SW_MAC_TOS_PRIO_26 0x035A +#define REG_SW_MAC_TOS_PRIO_27 0x035B +#define REG_SW_MAC_TOS_PRIO_28 0x035C +#define REG_SW_MAC_TOS_PRIO_29 0x035D +#define REG_SW_MAC_TOS_PRIO_30 0x035E +#define REG_SW_MAC_TOS_PRIO_31 0x035F + +#define REG_SW_MRI_CTRL_0 0x0370 + +#define SW_IGMP_SNOOP BIT(6) +#define SW_IPV6_MLD_OPTION BIT(3) +#define SW_IPV6_MLD_SNOOP BIT(2) +#define SW_MIRROR_RX_TX BIT(0) + +#define REG_SW_CLASS_D_IP_CTRL__4 0x0374 + +#define SW_CLASS_D_IP_ENABLE BIT(31) + +#define REG_SW_MRI_CTRL_8 0x0378 + +#define SW_NO_COLOR_S 6 +#define SW_RED_COLOR_S 4 +#define SW_YELLOW_COLOR_S 2 +#define SW_GREEN_COLOR_S 0 +#define SW_COLOR_M 0x3 + +#define REG_SW_QM_CTRL__4 0x0390 + +#define PRIO_SCHEME_SELECT_M KS_PRIO_M +#define PRIO_SCHEME_SELECT_S 6 +#define PRIO_MAP_3_HI 0 +#define PRIO_MAP_2_HI 2 +#define PRIO_MAP_0_LO 3 +#define UNICAST_VLAN_BOUNDARY BIT(1) + +#define REG_SW_EEE_QM_CTRL__2 0x03C0 + +#define REG_SW_EEE_TXQ_WAIT_TIME__2 0x03C2 + +/* 4 - */ +#define REG_SW_VLAN_ENTRY__4 0x0400 + +#define VLAN_VALID BIT(31) +#define VLAN_FORWARD_OPTION BIT(27) +#define VLAN_PRIO_M KS_PRIO_M +#define VLAN_PRIO_S 24 +#define VLAN_MSTP_M 0x7 +#define VLAN_MSTP_S 12 +#define VLAN_FID_M 0x7F + +#define REG_SW_VLAN_ENTRY_UNTAG__4 0x0404 +#define REG_SW_VLAN_ENTRY_PORTS__4 0x0408 + +#define REG_SW_VLAN_ENTRY_INDEX__2 0x040C + +#define VLAN_INDEX_M 0x0FFF + +#define REG_SW_VLAN_CTRL 0x040E + +#define VLAN_START BIT(7) +#define VLAN_ACTION 0x3 +#define VLAN_WRITE 1 +#define VLAN_READ 2 +#define VLAN_CLEAR 3 + +#define REG_SW_ALU_INDEX_0 0x0410 + +#define ALU_FID_INDEX_S 16 +#define ALU_MAC_ADDR_HI 0xFFFF + +#define REG_SW_ALU_INDEX_1 0x0414 + +#define ALU_DIRECT_INDEX_M (BIT(12) - 1) + +#define REG_SW_ALU_CTRL__4 0x0418 + +#define ALU_VALID_CNT_M (BIT(14) - 1) +#define ALU_VALID_CNT_S 16 +#define ALU_START BIT(7) +#define ALU_VALID BIT(6) +#define ALU_DIRECT BIT(2) +#define ALU_ACTION 0x3 +#define ALU_WRITE 1 +#define ALU_READ 2 +#define ALU_SEARCH 3 + +#define REG_SW_ALU_STAT_CTRL__4 0x041C + +#define ALU_STAT_INDEX_M (BIT(4) - 1) +#define ALU_STAT_INDEX_S 16 +#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1) +#define ALU_STAT_START BIT(7) +#define ALU_RESV_MCAST_ADDR BIT(1) +#define ALU_STAT_READ BIT(0) + +#define REG_SW_ALU_VAL_A 0x0420 + +#define ALU_V_STATIC_VALID BIT(31) +#define ALU_V_SRC_FILTER BIT(30) +#define ALU_V_DST_FILTER BIT(29) +#define ALU_V_PRIO_AGE_CNT_M (BIT(3) - 1) +#define ALU_V_PRIO_AGE_CNT_S 26 +#define ALU_V_MSTP_M 0x7 + +#define REG_SW_ALU_VAL_B 0x0424 + +#define ALU_V_OVERRIDE BIT(31) +#define ALU_V_USE_FID BIT(30) +#define ALU_V_PORT_MAP (BIT(24) - 1) + +#define REG_SW_ALU_VAL_C 0x0428 + +#define ALU_V_FID_M (BIT(16) - 1) +#define ALU_V_FID_S 16 +#define ALU_V_MAC_ADDR_HI 0xFFFF + +#define REG_SW_ALU_VAL_D 0x042C + +#define REG_HSR_ALU_INDEX_0 0x0440 + +#define REG_HSR_ALU_INDEX_1 0x0444 + +#define HSR_DST_MAC_INDEX_LO_S 16 +#define HSR_SRC_MAC_INDEX_HI 0xFFFF + +#define REG_HSR_ALU_INDEX_2 0x0448 + +#define HSR_INDEX_MAX BIT(9) +#define HSR_DIRECT_INDEX_M (HSR_INDEX_MAX - 1) + +#define REG_HSR_ALU_INDEX_3 0x044C + +#define HSR_PATH_INDEX_M (BIT(4) - 1) + +#define REG_HSR_ALU_CTRL__4 0x0450 + +#define HSR_VALID_CNT_M (BIT(14) - 1) +#define HSR_VALID_CNT_S 16 +#define HSR_START BIT(7) +#define HSR_VALID BIT(6) +#define HSR_SEARCH_END BIT(5) +#define HSR_DIRECT BIT(2) +#define HSR_ACTION 0x3 +#define HSR_WRITE 1 +#define HSR_READ 2 +#define HSR_SEARCH 3 + +#define REG_HSR_ALU_VAL_A 0x0454 + +#define HSR_V_STATIC_VALID BIT(31) +#define HSR_V_AGE_CNT_M (BIT(3) - 1) +#define HSR_V_AGE_CNT_S 26 +#define HSR_V_PATH_ID_M (BIT(4) - 1) + +#define REG_HSR_ALU_VAL_B 0x0458 + +#define REG_HSR_ALU_VAL_C 0x045C + +#define HSR_V_DST_MAC_ADDR_LO_S 16 +#define HSR_V_SRC_MAC_ADDR_HI 0xFFFF + +#define REG_HSR_ALU_VAL_D 0x0460 + +#define REG_HSR_ALU_VAL_E 0x0464 + +#define HSR_V_START_SEQ_1_S 16 +#define HSR_V_START_SEQ_2_S 0 + +#define REG_HSR_ALU_VAL_F 0x0468 + +#define HSR_V_EXP_SEQ_1_S 16 +#define HSR_V_EXP_SEQ_2_S 0 + +#define REG_HSR_ALU_VAL_G 0x046C + +#define HSR_V_SEQ_CNT_1_S 16 +#define HSR_V_SEQ_CNT_2_S 0 + +#define HSR_V_SEQ_M (BIT(16) - 1) + +/* 5 - PTP Clock */ +#define REG_PTP_CLK_CTRL 0x0500 + +#define PTP_STEP_ADJ BIT(6) +#define PTP_STEP_DIR BIT(5) +#define PTP_READ_TIME BIT(4) +#define PTP_LOAD_TIME BIT(3) +#define PTP_CLK_ADJ_ENABLE BIT(2) +#define PTP_CLK_ENABLE BIT(1) +#define PTP_CLK_RESET BIT(0) + +#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502 + +#define PTP_RTC_SUB_NANOSEC_M 0x0007 + +#define REG_PTP_RTC_NANOSEC 0x0504 +#define REG_PTP_RTC_NANOSEC_H 0x0504 +#define REG_PTP_RTC_NANOSEC_L 0x0506 + +#define REG_PTP_RTC_SEC 0x0508 +#define REG_PTP_RTC_SEC_H 0x0508 +#define REG_PTP_RTC_SEC_L 0x050A + +#define REG_PTP_SUBNANOSEC_RATE 0x050C +#define REG_PTP_SUBNANOSEC_RATE_H 0x050C + +#define PTP_RATE_DIR BIT(31) +#define PTP_TMP_RATE_ENABLE BIT(30) + +#define REG_PTP_SUBNANOSEC_RATE_L 0x050E + +#define REG_PTP_RATE_DURATION 0x0510 +#define REG_PTP_RATE_DURATION_H 0x0510 +#define REG_PTP_RATE_DURATION_L 0x0512 + +#define REG_PTP_MSG_CONF1 0x0514 + +#define PTP_802_1AS BIT(7) +#define PTP_ENABLE BIT(6) +#define PTP_ETH_ENABLE BIT(5) +#define PTP_IPV4_UDP_ENABLE BIT(4) +#define PTP_IPV6_UDP_ENABLE BIT(3) +#define PTP_TC_P2P BIT(2) +#define PTP_MASTER BIT(1) +#define PTP_1STEP BIT(0) + +#define REG_PTP_MSG_CONF2 0x0516 + +#define PTP_UNICAST_ENABLE BIT(12) +#define PTP_ALTERNATE_MASTER BIT(11) +#define PTP_ALL_HIGH_PRIO BIT(10) +#define PTP_SYNC_CHECK BIT(9) +#define PTP_DELAY_CHECK BIT(8) +#define PTP_PDELAY_CHECK BIT(7) +#define PTP_DROP_SYNC_DELAY_REQ BIT(5) +#define PTP_DOMAIN_CHECK BIT(4) +#define PTP_UDP_CHECKSUM BIT(2) + +#define REG_PTP_DOMAIN_VERSION 0x0518 +#define PTP_VERSION_M 0xFF00 +#define PTP_DOMAIN_M 0x00FF + +#define REG_PTP_UNIT_INDEX__4 0x0520 + +#define PTP_UNIT_M 0xF + +#define PTP_GPIO_INDEX_S 16 +#define PTP_TSI_INDEX_S 8 +#define PTP_TOU_INDEX_S 0 + +#define REG_PTP_TRIG_STATUS__4 0x0524 + +#define TRIG_ERROR_S 16 +#define TRIG_DONE_S 0 + +#define REG_PTP_INT_STATUS__4 0x0528 + +#define TRIG_INT_S 16 +#define TS_INT_S 0 + +#define TRIG_UNIT_M 0x7 +#define TS_UNIT_M 0x3 + +#define REG_PTP_CTRL_STAT__4 0x052C + +#define GPIO_IN BIT(7) +#define GPIO_OUT BIT(6) +#define TS_INT_ENABLE BIT(5) +#define TRIG_ACTIVE BIT(4) +#define TRIG_ENABLE BIT(3) +#define TRIG_RESET BIT(2) +#define TS_ENABLE BIT(1) +#define TS_RESET BIT(0) + +#define GPIO_CTRL_M (GPIO_IN | GPIO_OUT) + +#define TRIG_CTRL_M \ + (TRIG_ACTIVE | TRIG_ENABLE | TRIG_RESET) + +#define TS_CTRL_M \ + (TS_INT_ENABLE | TS_ENABLE | TS_RESET) + +#define REG_TRIG_TARGET_NANOSEC 0x0530 +#define REG_TRIG_TARGET_SEC 0x0534 + +#define REG_TRIG_CTRL__4 0x0538 + +#define TRIG_CASCADE_ENABLE BIT(31) +#define TRIG_CASCADE_TAIL BIT(30) +#define TRIG_CASCADE_UPS_M 0xF +#define TRIG_CASCADE_UPS_S 26 +#define TRIG_NOW BIT(25) +#define TRIG_NOTIFY BIT(24) +#define TRIG_EDGE BIT(23) +#define TRIG_PATTERN_S 20 +#define TRIG_PATTERN_M 0x7 +#define TRIG_NEG_EDGE 0 +#define TRIG_POS_EDGE 1 +#define TRIG_NEG_PULSE 2 +#define TRIG_POS_PULSE 3 +#define TRIG_NEG_PERIOD 4 +#define TRIG_POS_PERIOD 5 +#define TRIG_REG_OUTPUT 6 +#define TRIG_GPO_S 16 +#define TRIG_GPO_M 0xF +#define TRIG_CASCADE_ITERATE_CNT_M 0xFFFF + +#define REG_TRIG_CYCLE_WIDTH 0x053C + +#define REG_TRIG_CYCLE_CNT 0x0540 + +#define TRIG_CYCLE_CNT_M 0xFFFF +#define TRIG_CYCLE_CNT_S 16 +#define TRIG_BIT_PATTERN_M 0xFFFF + +#define REG_TRIG_ITERATE_TIME 0x0544 + +#define REG_TRIG_PULSE_WIDTH__4 0x0548 + +#define TRIG_PULSE_WIDTH_M 0x00FFFFFF + +#define REG_TS_CTRL_STAT__4 0x0550 + +#define TS_EVENT_DETECT_M 0xF +#define TS_EVENT_DETECT_S 17 +#define TS_EVENT_OVERFLOW BIT(16) +#define TS_GPI_M 0xF +#define TS_GPI_S 8 +#define TS_DETECT_RISE BIT(7) +#define TS_DETECT_FALL BIT(6) +#define TS_DETECT_S 6 +#define TS_CASCADE_TAIL BIT(5) +#define TS_CASCADE_UPS_M 0xF +#define TS_CASCADE_UPS_S 1 +#define TS_CASCADE_ENABLE BIT(0) + +#define DETECT_RISE (TS_DETECT_RISE >> TS_DETECT_S) +#define DETECT_FALL (TS_DETECT_FALL >> TS_DETECT_S) + +#define REG_TS_EVENT_0_NANOSEC 0x0554 +#define REG_TS_EVENT_0_SEC 0x0558 +#define REG_TS_EVENT_0_SUB_NANOSEC 0x055C + +#define REG_TS_EVENT_1_NANOSEC 0x0560 +#define REG_TS_EVENT_1_SEC 0x0564 +#define REG_TS_EVENT_1_SUB_NANOSEC 0x0568 + +#define REG_TS_EVENT_2_NANOSEC 0x056C +#define REG_TS_EVENT_2_SEC 0x0570 +#define REG_TS_EVENT_2_SUB_NANOSEC 0x0574 + +#define REG_TS_EVENT_3_NANOSEC 0x0578 +#define REG_TS_EVENT_3_SEC 0x057C +#define REG_TS_EVENT_3_SUB_NANOSEC 0x0580 + +#define REG_TS_EVENT_4_NANOSEC 0x0584 +#define REG_TS_EVENT_4_SEC 0x0588 +#define REG_TS_EVENT_4_SUB_NANOSEC 0x058C + +#define REG_TS_EVENT_5_NANOSEC 0x0590 +#define REG_TS_EVENT_5_SEC 0x0594 +#define REG_TS_EVENT_5_SUB_NANOSEC 0x0598 + +#define REG_TS_EVENT_6_NANOSEC 0x059C +#define REG_TS_EVENT_6_SEC 0x05A0 +#define REG_TS_EVENT_6_SUB_NANOSEC 0x05A4 + +#define REG_TS_EVENT_7_NANOSEC 0x05A8 +#define REG_TS_EVENT_7_SEC 0x05AC +#define REG_TS_EVENT_7_SUB_NANOSEC 0x05B0 + +#define TS_EVENT_EDGE_M 0x1 +#define TS_EVENT_EDGE_S 30 +#define TS_EVENT_NANOSEC_M (BIT(30) - 1) + +#define TS_EVENT_SUB_NANOSEC_M 0x7 + +#define TS_EVENT_SAMPLE \ + (REG_TS_EVENT_1_NANOSEC - REG_TS_EVENT_0_NANOSEC) + +#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12)) + +#define REG_GLOBAL_RR_INDEX__1 0x0600 + +/* DLR */ +#define REG_DLR_SRC_PORT__4 0x0604 + +#define DLR_SRC_PORT_UNICAST BIT(31) +#define DLR_SRC_PORT_M 0x3 +#define DLR_SRC_PORT_BOTH 0 +#define DLR_SRC_PORT_EACH 1 + +#define REG_DLR_IP_ADDR__4 0x0608 + +#define REG_DLR_CTRL__1 0x0610 + +#define DLR_RESET_SEQ_ID BIT(3) +#define DLR_BACKUP_AUTO_ON BIT(2) +#define DLR_BEACON_TX_ENABLE BIT(1) +#define DLR_ASSIST_ENABLE BIT(0) + +#define REG_DLR_STATE__1 0x0611 + +#define DLR_NODE_STATE_M 0x3 +#define DLR_NODE_STATE_S 1 +#define DLR_NODE_STATE_IDLE 0 +#define DLR_NODE_STATE_FAULT 1 +#define DLR_NODE_STATE_NORMAL 2 +#define DLR_RING_STATE_FAULT 0 +#define DLR_RING_STATE_NORMAL 1 + +#define REG_DLR_PRECEDENCE__1 0x0612 + +#define REG_DLR_BEACON_INTERVAL__4 0x0614 + +#define REG_DLR_BEACON_TIMEOUT__4 0x0618 + +#define REG_DLR_TIMEOUT_WINDOW__4 0x061C + +#define DLR_TIMEOUT_WINDOW_M (BIT(22) - 1) + +#define REG_DLR_VLAN_ID__2 0x0620 + +#define DLR_VLAN_ID_M (BIT(12) - 1) + +#define REG_DLR_DEST_ADDR_0 0x0622 +#define REG_DLR_DEST_ADDR_1 0x0623 +#define REG_DLR_DEST_ADDR_2 0x0624 +#define REG_DLR_DEST_ADDR_3 0x0625 +#define REG_DLR_DEST_ADDR_4 0x0626 +#define REG_DLR_DEST_ADDR_5 0x0627 + +#define REG_DLR_PORT_MAP__4 0x0628 + +#define REG_DLR_CLASS__1 0x062C + +#define DLR_FRAME_QID_M 0x3 + +/* HSR */ +#define REG_HSR_PORT_MAP__4 0x0640 + +#define REG_HSR_ALU_CTRL_0__1 0x0644 + +#define HSR_DUPLICATE_DISCARD BIT(7) +#define HSR_NODE_UNICAST BIT(6) +#define HSR_AGE_CNT_DEFAULT_M 0x7 +#define HSR_AGE_CNT_DEFAULT_S 3 +#define HSR_LEARN_MCAST_DISABLE BIT(2) +#define HSR_HASH_OPTION_M 0x3 +#define HSR_HASH_DISABLE 0 +#define HSR_HASH_UPPER_BITS 1 +#define HSR_HASH_LOWER_BITS 2 +#define HSR_HASH_XOR_BOTH_BITS 3 + +#define REG_HSR_ALU_CTRL_1__1 0x0645 + +#define HSR_LEARN_UCAST_DISABLE BIT(7) +#define HSR_FLUSH_TABLE BIT(5) +#define HSR_PROC_MCAST_SRC BIT(3) +#define HSR_AGING_ENABLE BIT(2) + +#define REG_HSR_ALU_CTRL_2__2 0x0646 + +#define REG_HSR_ALU_AGE_PERIOD__4 0x0648 + +#define REG_HSR_ALU_INT_STATUS__1 0x064C +#define REG_HSR_ALU_INT_MASK__1 0x064D + +#define HSR_WINDOW_OVERFLOW_INT BIT(3) +#define HSR_LEARN_FAIL_INT BIT(2) +#define HSR_ALMOST_FULL_INT BIT(1) +#define HSR_WRITE_FAIL_INT BIT(0) + +#define REG_HSR_ALU_ENTRY_0__2 0x0650 + +#define HSR_ENTRY_INDEX_M (BIT(10) - 1) +#define HSR_FAIL_INDEX_M (BIT(8) - 1) + +#define REG_HSR_ALU_ENTRY_1__2 0x0652 + +#define HSR_FAIL_LEARN_INDEX_M (BIT(8) - 1) + +#define REG_HSR_ALU_ENTRY_3__2 0x0654 + +#define HSR_CPU_ACCESS_ENTRY_INDEX_M (BIT(8) - 1) + +/* 0 - Operation */ +#define REG_PORT_DEFAULT_VID 0x0000 + +#define REG_PORT_CUSTOM_VID 0x0002 +#define REG_PORT_AVB_SR_1_VID 0x0004 +#define REG_PORT_AVB_SR_2_VID 0x0006 + +#define REG_PORT_AVB_SR_1_TYPE 0x0008 +#define REG_PORT_AVB_SR_2_TYPE 0x000A + +#define REG_PORT_PME_STATUS 0x0013 +#define REG_PORT_PME_CTRL 0x0017 + +#define PME_WOL_MAGICPKT BIT(2) +#define PME_WOL_LINKUP BIT(1) +#define PME_WOL_ENERGY BIT(0) + +#define REG_PORT_INT_STATUS 0x001B +#define REG_PORT_INT_MASK 0x001F + +#define PORT_SGMII_INT BIT(3) +#define PORT_PTP_INT BIT(2) +#define PORT_PHY_INT BIT(1) +#define PORT_ACL_INT BIT(0) + +#define PORT_INT_MASK \ + (PORT_SGMII_INT | PORT_PTP_INT | PORT_PHY_INT | PORT_ACL_INT) + +#define REG_PORT_CTRL_0 0x0020 + +#define PORT_MAC_LOOPBACK BIT(7) +#define PORT_FORCE_TX_FLOW_CTRL BIT(4) +#define PORT_FORCE_RX_FLOW_CTRL BIT(3) +#define PORT_TAIL_TAG_ENABLE BIT(2) +#define PORT_QUEUE_SPLIT_ENABLE 0x3 + +#define REG_PORT_CTRL_1 0x0021 + +#define PORT_SRP_ENABLE 0x3 + +#define REG_PORT_STATUS_0 0x0030 + +#define PORT_INTF_SPEED_M 0x3 +#define PORT_INTF_SPEED_S 3 +#define PORT_INTF_FULL_DUPLEX BIT(2) +#define PORT_TX_FLOW_CTRL BIT(1) +#define PORT_RX_FLOW_CTRL BIT(0) + +#define REG_PORT_STATUS_1 0x0034 + +/* 1 - PHY */ +#define REG_PORT_PHY_CTRL 0x0100 + +#define PORT_PHY_RESET BIT(15) +#define PORT_PHY_LOOPBACK BIT(14) +#define PORT_SPEED_100MBIT BIT(13) +#define PORT_AUTO_NEG_ENABLE BIT(12) +#define PORT_POWER_DOWN BIT(11) +#define PORT_ISOLATE BIT(10) +#define PORT_AUTO_NEG_RESTART BIT(9) +#define PORT_FULL_DUPLEX BIT(8) +#define PORT_COLLISION_TEST BIT(7) +#define PORT_SPEED_1000MBIT BIT(6) + +#define REG_PORT_PHY_STATUS 0x0102 + +#define PORT_100BT4_CAPABLE BIT(15) +#define PORT_100BTX_FD_CAPABLE BIT(14) +#define PORT_100BTX_CAPABLE BIT(13) +#define PORT_10BT_FD_CAPABLE BIT(12) +#define PORT_10BT_CAPABLE BIT(11) +#define PORT_EXTENDED_STATUS BIT(8) +#define PORT_MII_SUPPRESS_CAPABLE BIT(6) +#define PORT_AUTO_NEG_ACKNOWLEDGE BIT(5) +#define PORT_REMOTE_FAULT BIT(4) +#define PORT_AUTO_NEG_CAPABLE BIT(3) +#define PORT_LINK_STATUS BIT(2) +#define PORT_JABBER_DETECT BIT(1) +#define PORT_EXTENDED_CAPABILITY BIT(0) + +#define REG_PORT_PHY_ID_HI 0x0104 +#define REG_PORT_PHY_ID_LO 0x0106 + +#define KSZ9477_ID_HI 0x0022 +#define KSZ9477_ID_LO 0x1622 + +#define REG_PORT_PHY_AUTO_NEGOTIATION 0x0108 + +#define PORT_AUTO_NEG_NEXT_PAGE BIT(15) +#define PORT_AUTO_NEG_REMOTE_FAULT BIT(13) +#define PORT_AUTO_NEG_ASYM_PAUSE BIT(11) +#define PORT_AUTO_NEG_SYM_PAUSE BIT(10) +#define PORT_AUTO_NEG_100BT4 BIT(9) +#define PORT_AUTO_NEG_100BTX_FD BIT(8) +#define PORT_AUTO_NEG_100BTX BIT(7) +#define PORT_AUTO_NEG_10BT_FD BIT(6) +#define PORT_AUTO_NEG_10BT BIT(5) +#define PORT_AUTO_NEG_SELECTOR 0x001F +#define PORT_AUTO_NEG_802_3 0x0001 + +#define PORT_AUTO_NEG_PAUSE \ + (PORT_AUTO_NEG_ASYM_PAUSE | PORT_AUTO_NEG_SYM_PAUSE) + +#define REG_PORT_PHY_REMOTE_CAPABILITY 0x010A + +#define PORT_REMOTE_NEXT_PAGE BIT(15) +#define PORT_REMOTE_ACKNOWLEDGE BIT(14) +#define PORT_REMOTE_REMOTE_FAULT BIT(13) +#define PORT_REMOTE_ASYM_PAUSE BIT(11) +#define PORT_REMOTE_SYM_PAUSE BIT(10) +#define PORT_REMOTE_100BTX_FD BIT(8) +#define PORT_REMOTE_100BTX BIT(7) +#define PORT_REMOTE_10BT_FD BIT(6) +#define PORT_REMOTE_10BT BIT(5) + +#define REG_PORT_PHY_1000_CTRL 0x0112 + +#define PORT_AUTO_NEG_MANUAL BIT(12) +#define PORT_AUTO_NEG_MASTER BIT(11) +#define PORT_AUTO_NEG_MASTER_PREFERRED BIT(10) +#define PORT_AUTO_NEG_1000BT_FD BIT(9) +#define PORT_AUTO_NEG_1000BT BIT(8) + +#define REG_PORT_PHY_1000_STATUS 0x0114 + +#define PORT_MASTER_FAULT BIT(15) +#define PORT_LOCAL_MASTER BIT(14) +#define PORT_LOCAL_RX_OK BIT(13) +#define PORT_REMOTE_RX_OK BIT(12) +#define PORT_REMOTE_1000BT_FD BIT(11) +#define PORT_REMOTE_1000BT BIT(10) +#define PORT_REMOTE_IDLE_CNT_M 0x0F + +#define PORT_PHY_1000_STATIC_STATUS \ + (PORT_LOCAL_RX_OK | \ + PORT_REMOTE_RX_OK | \ + PORT_REMOTE_1000BT_FD | \ + PORT_REMOTE_1000BT) + +#define REG_PORT_PHY_MMD_SETUP 0x011A + +#define PORT_MMD_OP_MODE_M 0x3 +#define PORT_MMD_OP_MODE_S 14 +#define PORT_MMD_OP_INDEX 0 +#define PORT_MMD_OP_DATA_NO_INCR 1 +#define PORT_MMD_OP_DATA_INCR_RW 2 +#define PORT_MMD_OP_DATA_INCR_W 3 +#define PORT_MMD_DEVICE_ID_M 0x1F + +#define MMD_SETUP(mode, dev) \ + (((u16)(mode) << PORT_MMD_OP_MODE_S) | (dev)) + +#define REG_PORT_PHY_MMD_INDEX_DATA 0x011C + +#define MMD_DEVICE_ID_DSP 1 + +#define MMD_DSP_SQI_CHAN_A 0xAC +#define MMD_DSP_SQI_CHAN_B 0xAD +#define MMD_DSP_SQI_CHAN_C 0xAE +#define MMD_DSP_SQI_CHAN_D 0xAF + +#define DSP_SQI_ERR_DETECTED BIT(15) +#define DSP_SQI_AVG_ERR 0x7FFF + +#define MMD_DEVICE_ID_COMMON 2 + +#define MMD_DEVICE_ID_EEE_ADV 7 + +#define MMD_EEE_ADV 0x3C +#define EEE_ADV_100MBIT BIT(1) +#define EEE_ADV_1GBIT BIT(2) + +#define MMD_EEE_LP_ADV 0x3D +#define MMD_EEE_MSG_CODE 0x3F + +#define MMD_DEVICE_ID_AFED 0x1C + +#define REG_PORT_PHY_EXTENDED_STATUS 0x011E + +#define PORT_100BTX_FD_ABLE BIT(15) +#define PORT_100BTX_ABLE BIT(14) +#define PORT_10BT_FD_ABLE BIT(13) +#define PORT_10BT_ABLE BIT(12) + +#define REG_PORT_SGMII_ADDR__4 0x0200 +#define PORT_SGMII_AUTO_INCR BIT(23) +#define PORT_SGMII_DEVICE_ID_M 0x1F +#define PORT_SGMII_DEVICE_ID_S 16 +#define PORT_SGMII_ADDR_M (BIT(21) - 1) + +#define REG_PORT_SGMII_DATA__4 0x0204 +#define PORT_SGMII_DATA_M (BIT(16) - 1) + +#define MMD_DEVICE_ID_PMA 0x01 +#define MMD_DEVICE_ID_PCS 0x03 +#define MMD_DEVICE_ID_PHY_XS 0x04 +#define MMD_DEVICE_ID_DTE_XS 0x05 +#define MMD_DEVICE_ID_AN 0x07 +#define MMD_DEVICE_ID_VENDOR_CTRL 0x1E +#define MMD_DEVICE_ID_VENDOR_MII 0x1F + +#define SR_MII MMD_DEVICE_ID_VENDOR_MII + +#define MMD_SR_MII_CTRL 0x0000 + +#define SR_MII_RESET BIT(15) +#define SR_MII_LOOPBACK BIT(14) +#define SR_MII_SPEED_100MBIT BIT(13) +#define SR_MII_AUTO_NEG_ENABLE BIT(12) +#define SR_MII_POWER_DOWN BIT(11) +#define SR_MII_AUTO_NEG_RESTART BIT(9) +#define SR_MII_FULL_DUPLEX BIT(8) +#define SR_MII_SPEED_1000MBIT BIT(6) + +#define MMD_SR_MII_STATUS 0x0001 +#define MMD_SR_MII_ID_1 0x0002 +#define MMD_SR_MII_ID_2 0x0003 +#define MMD_SR_MII_AUTO_NEGOTIATION 0x0004 + +#define SR_MII_AUTO_NEG_NEXT_PAGE BIT(15) +#define SR_MII_AUTO_NEG_REMOTE_FAULT_M 0x3 +#define SR_MII_AUTO_NEG_REMOTE_FAULT_S 12 +#define SR_MII_AUTO_NEG_NO_ERROR 0 +#define SR_MII_AUTO_NEG_OFFLINE 1 +#define SR_MII_AUTO_NEG_LINK_FAILURE 2 +#define SR_MII_AUTO_NEG_ERROR 3 +#define SR_MII_AUTO_NEG_PAUSE_M 0x3 +#define SR_MII_AUTO_NEG_PAUSE_S 7 +#define SR_MII_AUTO_NEG_NO_PAUSE 0 +#define SR_MII_AUTO_NEG_ASYM_PAUSE_TX 1 +#define SR_MII_AUTO_NEG_SYM_PAUSE 2 +#define SR_MII_AUTO_NEG_ASYM_PAUSE_RX 3 +#define SR_MII_AUTO_NEG_HALF_DUPLEX BIT(6) +#define SR_MII_AUTO_NEG_FULL_DUPLEX BIT(5) + +#define MMD_SR_MII_REMOTE_CAPABILITY 0x0005 +#define MMD_SR_MII_AUTO_NEG_EXP 0x0006 +#define MMD_SR_MII_AUTO_NEG_EXT 0x000F + +#define MMD_SR_MII_DIGITAL_CTRL_1 0x8000 + +#define MMD_SR_MII_AUTO_NEG_CTRL 0x8001 + +#define SR_MII_8_BIT BIT(8) +#define SR_MII_SGMII_LINK_UP BIT(4) +#define SR_MII_TX_CFG_PHY_MASTER BIT(3) +#define SR_MII_PCS_MODE_M 0x3 +#define SR_MII_PCS_MODE_S 1 +#define SR_MII_PCS_SGMII 2 +#define SR_MII_AUTO_NEG_COMPLETE_INTR BIT(0) + +#define MMD_SR_MII_AUTO_NEG_STATUS 0x8002 + +#define SR_MII_STAT_LINK_UP BIT(4) +#define SR_MII_STAT_M 0x3 +#define SR_MII_STAT_S 2 +#define SR_MII_STAT_10_MBPS 0 +#define SR_MII_STAT_100_MBPS 1 +#define SR_MII_STAT_1000_MBPS 2 +#define SR_MII_STAT_FULL_DUPLEX BIT(1) + +#define MMD_SR_MII_PHY_CTRL 0x80A0 + +#define SR_MII_PHY_LANE_SEL_M 0xF +#define SR_MII_PHY_LANE_SEL_S 8 +#define SR_MII_PHY_WRITE BIT(1) +#define SR_MII_PHY_START_BUSY BIT(0) + +#define MMD_SR_MII_PHY_ADDR 0x80A1 + +#define SR_MII_PHY_ADDR_M (BIT(16) - 1) + +#define MMD_SR_MII_PHY_DATA 0x80A2 + +#define SR_MII_PHY_DATA_M (BIT(16) - 1) + +#define SR_MII_PHY_JTAG_CHIP_ID_HI 0x000C +#define SR_MII_PHY_JTAG_CHIP_ID_LO 0x000D + +#define REG_PORT_PHY_REMOTE_LB_LED 0x0122 + +#define PORT_REMOTE_LOOPBACK BIT(8) +#define PORT_LED_SELECT (3 << 6) +#define PORT_LED_CTRL (3 << 4) +#define PORT_LED_CTRL_TEST BIT(3) +#define PORT_10BT_PREAMBLE BIT(2) +#define PORT_LINK_MD_10BT_ENABLE BIT(1) +#define PORT_LINK_MD_PASS BIT(0) + +#define REG_PORT_PHY_LINK_MD 0x0124 + +#define PORT_START_CABLE_DIAG BIT(15) +#define PORT_TX_DISABLE BIT(14) +#define PORT_CABLE_DIAG_PAIR_M 0x3 +#define PORT_CABLE_DIAG_PAIR_S 12 +#define PORT_CABLE_DIAG_SELECT_M 0x3 +#define PORT_CABLE_DIAG_SELECT_S 10 +#define PORT_CABLE_DIAG_RESULT_M 0x3 +#define PORT_CABLE_DIAG_RESULT_S 8 +#define PORT_CABLE_STAT_NORMAL 0 +#define PORT_CABLE_STAT_OPEN 1 +#define PORT_CABLE_STAT_SHORT 2 +#define PORT_CABLE_STAT_FAILED 3 +#define PORT_CABLE_FAULT_COUNTER 0x00FF + +#define REG_PORT_PHY_PMA_STATUS 0x0126 + +#define PORT_1000_LINK_GOOD BIT(1) +#define PORT_100_LINK_GOOD BIT(0) + +#define REG_PORT_PHY_DIGITAL_STATUS 0x0128 + +#define PORT_LINK_DETECT BIT(14) +#define PORT_SIGNAL_DETECT BIT(13) +#define PORT_PHY_STAT_MDI BIT(12) +#define PORT_PHY_STAT_MASTER BIT(11) + +#define REG_PORT_PHY_RXER_COUNTER 0x012A + +#define REG_PORT_PHY_INT_ENABLE 0x0136 +#define REG_PORT_PHY_INT_STATUS 0x0137 + +#define JABBER_INT BIT(7) +#define RX_ERR_INT BIT(6) +#define PAGE_RX_INT BIT(5) +#define PARALLEL_DETECT_FAULT_INT BIT(4) +#define LINK_PARTNER_ACK_INT BIT(3) +#define LINK_DOWN_INT BIT(2) +#define REMOTE_FAULT_INT BIT(1) +#define LINK_UP_INT BIT(0) + +#define REG_PORT_PHY_DIGITAL_DEBUG_1 0x0138 + +#define PORT_REG_CLK_SPEED_25_MHZ BIT(14) +#define PORT_PHY_FORCE_MDI BIT(7) +#define PORT_PHY_AUTO_MDIX_DISABLE BIT(6) + +/* Same as PORT_PHY_LOOPBACK */ +#define PORT_PHY_PCS_LOOPBACK BIT(0) + +#define REG_PORT_PHY_DIGITAL_DEBUG_2 0x013A + +#define REG_PORT_PHY_DIGITAL_DEBUG_3 0x013C + +#define PORT_100BT_FIXED_LATENCY BIT(15) + +#define REG_PORT_PHY_PHY_CTRL 0x013E + +#define PORT_INT_PIN_HIGH BIT(14) +#define PORT_ENABLE_JABBER BIT(9) +#define PORT_STAT_SPEED_1000MBIT BIT(6) +#define PORT_STAT_SPEED_100MBIT BIT(5) +#define PORT_STAT_SPEED_10MBIT BIT(4) +#define PORT_STAT_FULL_DUPLEX BIT(3) + +/* Same as PORT_PHY_STAT_MASTER */ +#define PORT_STAT_MASTER BIT(2) +#define PORT_RESET BIT(1) +#define PORT_LINK_STATUS_FAIL BIT(0) + +/* 3 - xMII */ +#define REG_PORT_XMII_CTRL_0 0x0300 + +#define PORT_SGMII_SEL BIT(7) +#define PORT_MII_FULL_DUPLEX BIT(6) +#define PORT_MII_100MBIT BIT(4) +#define PORT_GRXC_ENABLE BIT(0) + +#define REG_PORT_XMII_CTRL_1 0x0301 + +#define PORT_RMII_CLK_SEL BIT(7) +/* S1 */ +#define PORT_MII_1000MBIT_S1 BIT(6) +/* S2 */ +#define PORT_MII_NOT_1GBIT BIT(6) +#define PORT_MII_SEL_EDGE BIT(5) +#define PORT_RGMII_ID_IG_ENABLE BIT(4) +#define PORT_RGMII_ID_EG_ENABLE BIT(3) +#define PORT_MII_MAC_MODE BIT(2) +#define PORT_MII_SEL_M 0x3 +/* S1 */ +#define PORT_MII_SEL_S1 0x0 +#define PORT_RMII_SEL_S1 0x1 +#define PORT_GMII_SEL_S1 0x2 +#define PORT_RGMII_SEL_S1 0x3 +/* S2 */ +#define PORT_RGMII_SEL 0x0 +#define PORT_RMII_SEL 0x1 +#define PORT_GMII_SEL 0x2 +#define PORT_MII_SEL 0x3 + +/* 4 - MAC */ +#define REG_PORT_MAC_CTRL_0 0x0400 + +#define PORT_BROADCAST_STORM BIT(1) +#define PORT_JUMBO_FRAME BIT(0) + +#define REG_PORT_MAC_CTRL_1 0x0401 + +#define PORT_BACK_PRESSURE BIT(3) +#define PORT_PASS_ALL BIT(0) + +#define REG_PORT_MAC_CTRL_2 0x0402 + +#define PORT_100BT_EEE_DISABLE BIT(7) +#define PORT_1000BT_EEE_DISABLE BIT(6) + +#define REG_PORT_MAC_IN_RATE_LIMIT 0x0403 + +#define PORT_IN_PORT_BASED_S 6 +#define PORT_RATE_PACKET_BASED_S 5 +#define PORT_IN_FLOW_CTRL_S 4 +#define PORT_COUNT_IFG_S 1 +#define PORT_COUNT_PREAMBLE_S 0 +#define PORT_IN_PORT_BASED BIT(6) +#define PORT_IN_PACKET_BASED BIT(5) +#define PORT_IN_FLOW_CTRL BIT(4) +#define PORT_IN_LIMIT_MODE_M 0x3 +#define PORT_IN_LIMIT_MODE_S 2 +#define PORT_IN_ALL 0 +#define PORT_IN_UNICAST 1 +#define PORT_IN_MULTICAST 2 +#define PORT_IN_BROADCAST 3 +#define PORT_COUNT_IFG BIT(1) +#define PORT_COUNT_PREAMBLE BIT(0) + +#define REG_PORT_IN_RATE_0 0x0410 +#define REG_PORT_IN_RATE_1 0x0411 +#define REG_PORT_IN_RATE_2 0x0412 +#define REG_PORT_IN_RATE_3 0x0413 +#define REG_PORT_IN_RATE_4 0x0414 +#define REG_PORT_IN_RATE_5 0x0415 +#define REG_PORT_IN_RATE_6 0x0416 +#define REG_PORT_IN_RATE_7 0x0417 + +#define REG_PORT_OUT_RATE_0 0x0420 +#define REG_PORT_OUT_RATE_1 0x0421 +#define REG_PORT_OUT_RATE_2 0x0422 +#define REG_PORT_OUT_RATE_3 0x0423 + +#define PORT_RATE_LIMIT_M (BIT(7) - 1) + +/* 5 - MIB Counters */ +#define REG_PORT_MIB_CTRL_STAT__4 0x0500 + +#define MIB_COUNTER_OVERFLOW BIT(31) +#define MIB_COUNTER_VALID BIT(30) +#define MIB_COUNTER_READ BIT(25) +#define MIB_COUNTER_FLUSH_FREEZE BIT(24) +#define MIB_COUNTER_INDEX_M (BIT(8) - 1) +#define MIB_COUNTER_INDEX_S 16 +#define MIB_COUNTER_DATA_HI_M 0xF + +#define REG_PORT_MIB_DATA 0x0504 + +/* 6 - ACL */ +#define REG_PORT_ACL_0 0x0600 + +#define ACL_FIRST_RULE_M 0xF + +#define REG_PORT_ACL_1 0x0601 + +#define ACL_MODE_M 0x3 +#define ACL_MODE_S 4 +#define ACL_MODE_DISABLE 0 +#define ACL_MODE_LAYER_2 1 +#define ACL_MODE_LAYER_3 2 +#define ACL_MODE_LAYER_4 3 +#define ACL_ENABLE_M 0x3 +#define ACL_ENABLE_S 2 +#define ACL_ENABLE_2_COUNT 0 +#define ACL_ENABLE_2_TYPE 1 +#define ACL_ENABLE_2_MAC 2 +#define ACL_ENABLE_2_BOTH 3 +#define ACL_ENABLE_3_IP 1 +#define ACL_ENABLE_3_SRC_DST_COMP 2 +#define ACL_ENABLE_4_PROTOCOL 0 +#define ACL_ENABLE_4_TCP_PORT_COMP 1 +#define ACL_ENABLE_4_UDP_PORT_COMP 2 +#define ACL_ENABLE_4_TCP_SEQN_COMP 3 +#define ACL_SRC BIT(1) +#define ACL_EQUAL BIT(0) + +#define REG_PORT_ACL_2 0x0602 +#define REG_PORT_ACL_3 0x0603 + +#define ACL_MAX_PORT 0xFFFF + +#define REG_PORT_ACL_4 0x0604 +#define REG_PORT_ACL_5 0x0605 + +#define ACL_MIN_PORT 0xFFFF +#define ACL_IP_ADDR 0xFFFFFFFF +#define ACL_TCP_SEQNUM 0xFFFFFFFF + +#define REG_PORT_ACL_6 0x0606 + +#define ACL_RESERVED 0xF8 +#define ACL_PORT_MODE_M 0x3 +#define ACL_PORT_MODE_S 1 +#define ACL_PORT_MODE_DISABLE 0 +#define ACL_PORT_MODE_EITHER 1 +#define ACL_PORT_MODE_IN_RANGE 2 +#define ACL_PORT_MODE_OUT_OF_RANGE 3 + +#define REG_PORT_ACL_7 0x0607 + +#define ACL_TCP_FLAG_ENABLE BIT(0) + +#define REG_PORT_ACL_8 0x0608 + +#define ACL_TCP_FLAG_M 0xFF + +#define REG_PORT_ACL_9 0x0609 + +#define ACL_TCP_FLAG 0xFF +#define ACL_ETH_TYPE 0xFFFF +#define ACL_IP_M 0xFFFFFFFF + +#define REG_PORT_ACL_A 0x060A + +#define ACL_PRIO_MODE_M 0x3 +#define ACL_PRIO_MODE_S 6 +#define ACL_PRIO_MODE_DISABLE 0 +#define ACL_PRIO_MODE_HIGHER 1 +#define ACL_PRIO_MODE_LOWER 2 +#define ACL_PRIO_MODE_REPLACE 3 +#define ACL_PRIO_M KS_PRIO_M +#define ACL_PRIO_S 3 +#define ACL_VLAN_PRIO_REPLACE BIT(2) +#define ACL_VLAN_PRIO_M KS_PRIO_M +#define ACL_VLAN_PRIO_HI_M 0x3 + +#define REG_PORT_ACL_B 0x060B + +#define ACL_VLAN_PRIO_LO_M 0x8 +#define ACL_VLAN_PRIO_S 7 +#define ACL_MAP_MODE_M 0x3 +#define ACL_MAP_MODE_S 5 +#define ACL_MAP_MODE_DISABLE 0 +#define ACL_MAP_MODE_OR 1 +#define ACL_MAP_MODE_AND 2 +#define ACL_MAP_MODE_REPLACE 3 + +#define ACL_CNT_M (BIT(11) - 1) +#define ACL_CNT_S 5 + +#define REG_PORT_ACL_C 0x060C + +#define REG_PORT_ACL_D 0x060D +#define ACL_MSEC_UNIT BIT(6) +#define ACL_INTR_MODE BIT(5) +#define ACL_PORT_MAP 0x7F + +#define REG_PORT_ACL_E 0x060E +#define REG_PORT_ACL_F 0x060F + +#define REG_PORT_ACL_BYTE_EN_MSB 0x0610 +#define REG_PORT_ACL_BYTE_EN_LSB 0x0611 + +#define ACL_ACTION_START 0xA +#define ACL_ACTION_LEN 4 +#define ACL_INTR_CNT_START 0xD +#define ACL_RULESET_START 0xE +#define ACL_RULESET_LEN 2 +#define ACL_TABLE_LEN 16 + +#define ACL_ACTION_ENABLE 0x003C +#define ACL_MATCH_ENABLE 0x7FC3 +#define ACL_RULESET_ENABLE 0x8003 +#define ACL_BYTE_ENABLE 0xFFFF + +#define REG_PORT_ACL_CTRL_0 0x0612 + +#define PORT_ACL_WRITE_DONE BIT(6) +#define PORT_ACL_READ_DONE BIT(5) +#define PORT_ACL_WRITE BIT(4) +#define PORT_ACL_INDEX_M 0xF + +#define REG_PORT_ACL_CTRL_1 0x0613 + +/* 8 - Classification and Policing */ +#define REG_PORT_MRI_MIRROR_CTRL 0x0800 + +#define PORT_MIRROR_RX BIT(6) +#define PORT_MIRROR_TX BIT(5) +#define PORT_MIRROR_SNIFFER BIT(1) + +#define REG_PORT_MRI_PRIO_CTRL 0x0801 + +#define PORT_HIGHEST_PRIO BIT(7) +#define PORT_OR_PRIO BIT(6) +#define PORT_MAC_PRIO_ENABLE BIT(4) +#define PORT_VLAN_PRIO_ENABLE BIT(3) +#define PORT_802_1P_PRIO_ENABLE BIT(2) +#define PORT_DIFFSERV_PRIO_ENABLE BIT(1) +#define PORT_ACL_PRIO_ENABLE BIT(0) + +#define REG_PORT_MRI_MAC_CTRL 0x0802 + +#define PORT_USER_PRIO_CEILING BIT(7) +#define PORT_DROP_NON_VLAN BIT(4) +#define PORT_DROP_TAG BIT(3) +#define PORT_BASED_PRIO_M KS_PRIO_M +#define PORT_BASED_PRIO_S 0 + +#define REG_PORT_MRI_AUTHEN_CTRL 0x0803 + +#define PORT_ACL_ENABLE BIT(2) +#define PORT_AUTHEN_MODE 0x3 +#define PORT_AUTHEN_PASS 0 +#define PORT_AUTHEN_BLOCK 1 +#define PORT_AUTHEN_TRAP 2 + +#define REG_PORT_MRI_INDEX__4 0x0804 + +#define MRI_INDEX_P_M 0x7 +#define MRI_INDEX_P_S 16 +#define MRI_INDEX_Q_M 0x3 +#define MRI_INDEX_Q_S 0 + +#define REG_PORT_MRI_TC_MAP__4 0x0808 + +#define PORT_TC_MAP_M 0xf +#define PORT_TC_MAP_S 4 + +#define REG_PORT_MRI_POLICE_CTRL__4 0x080C + +#define POLICE_DROP_ALL BIT(10) +#define POLICE_PACKET_TYPE_M 0x3 +#define POLICE_PACKET_TYPE_S 8 +#define POLICE_PACKET_DROPPED 0 +#define POLICE_PACKET_GREEN 1 +#define POLICE_PACKET_YELLOW 2 +#define POLICE_PACKET_RED 3 +#define PORT_BASED_POLICING BIT(7) +#define NON_DSCP_COLOR_M 0x3 +#define NON_DSCP_COLOR_S 5 +#define COLOR_MARK_ENABLE BIT(4) +#define COLOR_REMAP_ENABLE BIT(3) +#define POLICE_DROP_SRP BIT(2) +#define POLICE_COLOR_NOT_AWARE BIT(1) +#define POLICE_ENABLE BIT(0) + +#define REG_PORT_POLICE_COLOR_0__4 0x0810 +#define REG_PORT_POLICE_COLOR_1__4 0x0814 +#define REG_PORT_POLICE_COLOR_2__4 0x0818 +#define REG_PORT_POLICE_COLOR_3__4 0x081C + +#define POLICE_COLOR_MAP_S 2 +#define POLICE_COLOR_MAP_M (BIT(POLICE_COLOR_MAP_S) - 1) + +#define REG_PORT_POLICE_RATE__4 0x0820 + +#define POLICE_CIR_S 16 +#define POLICE_PIR_S 0 + +#define REG_PORT_POLICE_BURST_SIZE__4 0x0824 + +#define POLICE_BURST_SIZE_M 0x3FFF +#define POLICE_CBS_S 16 +#define POLICE_PBS_S 0 + +#define REG_PORT_WRED_PM_CTRL_0__4 0x0830 + +#define WRED_PM_CTRL_M (BIT(11) - 1) + +#define WRED_PM_MAX_THRESHOLD_S 16 +#define WRED_PM_MIN_THRESHOLD_S 0 + +#define REG_PORT_WRED_PM_CTRL_1__4 0x0834 + +#define WRED_PM_MULTIPLIER_S 16 +#define WRED_PM_AVG_QUEUE_SIZE_S 0 + +#define REG_PORT_WRED_QUEUE_CTRL_0__4 0x0840 +#define REG_PORT_WRED_QUEUE_CTRL_1__4 0x0844 + +#define REG_PORT_WRED_QUEUE_PMON__4 0x0848 + +#define WRED_RANDOM_DROP_ENABLE BIT(31) +#define WRED_PMON_FLUSH BIT(30) +#define WRED_DROP_GYR_DISABLE BIT(29) +#define WRED_DROP_YR_DISABLE BIT(28) +#define WRED_DROP_R_DISABLE BIT(27) +#define WRED_DROP_ALL BIT(26) +#define WRED_PMON_M (BIT(24) - 1) + +/* 9 - Shaping */ + +#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900 + +#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904 + +#define MTI_PVID_REPLACE BIT(0) + +#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914 + +#define MTI_SCHEDULE_MODE_M 0x3 +#define MTI_SCHEDULE_MODE_S 6 +#define MTI_SCHEDULE_STRICT_PRIO 0 +#define MTI_SCHEDULE_WRR 2 +#define MTI_SHAPING_M 0x3 +#define MTI_SHAPING_S 4 +#define MTI_SHAPING_OFF 0 +#define MTI_SHAPING_SRP 1 +#define MTI_SHAPING_TIME_AWARE 2 + +#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915 + +#define MTI_TX_RATIO_M (BIT(7) - 1) + +#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916 +#define REG_PORT_MTI_HI_WATER_MARK 0x0916 +#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918 +#define REG_PORT_MTI_LO_WATER_MARK 0x0918 +#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A +#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A + +/* A - QM */ + +#define REG_PORT_QM_CTRL__4 0x0A00 + +#define PORT_QM_DROP_PRIO_M 0x3 + +#define REG_PORT_VLAN_MEMBERSHIP__4 0x0A04 + +#define REG_PORT_QM_QUEUE_INDEX__4 0x0A08 + +#define PORT_QM_QUEUE_INDEX_S 24 +#define PORT_QM_BURST_SIZE_S 16 +#define PORT_QM_MIN_RESV_SPACE_M (BIT(11) - 1) + +#define REG_PORT_QM_WATER_MARK__4 0x0A0C + +#define PORT_QM_HI_WATER_MARK_S 16 +#define PORT_QM_LO_WATER_MARK_S 0 +#define PORT_QM_WATER_MARK_M (BIT(11) - 1) + +#define REG_PORT_QM_TX_CNT_0__4 0x0A10 + +#define PORT_QM_TX_CNT_USED_S 0 +#define PORT_QM_TX_CNT_M (BIT(11) - 1) + +#define REG_PORT_QM_TX_CNT_1__4 0x0A14 + +#define PORT_QM_TX_CNT_CALCULATED_S 16 +#define PORT_QM_TX_CNT_AVAIL_S 0 + +/* B - LUE */ +#define REG_PORT_LUE_CTRL 0x0B00 + +#define PORT_VLAN_LOOKUP_VID_0 BIT(7) +#define PORT_INGRESS_FILTER BIT(6) +#define PORT_DISCARD_NON_VID BIT(5) +#define PORT_MAC_BASED_802_1X BIT(4) +#define PORT_SRC_ADDR_FILTER BIT(3) + +#define REG_PORT_LUE_MSTP_INDEX 0x0B01 + +#define REG_PORT_LUE_MSTP_STATE 0x0B04 + +#define PORT_TX_ENABLE BIT(2) +#define PORT_RX_ENABLE BIT(1) +#define PORT_LEARN_DISABLE BIT(0) + +/* C - PTP */ + +#define REG_PTP_PORT_RX_DELAY__2 0x0C00 +#define REG_PTP_PORT_TX_DELAY__2 0x0C02 +#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04 + +#define REG_PTP_PORT_XDELAY_TS 0x0C08 +#define REG_PTP_PORT_XDELAY_TS_H 0x0C08 +#define REG_PTP_PORT_XDELAY_TS_L 0x0C0A + +#define REG_PTP_PORT_SYNC_TS 0x0C0C +#define REG_PTP_PORT_SYNC_TS_H 0x0C0C +#define REG_PTP_PORT_SYNC_TS_L 0x0C0E + +#define REG_PTP_PORT_PDRESP_TS 0x0C10 +#define REG_PTP_PORT_PDRESP_TS_H 0x0C10 +#define REG_PTP_PORT_PDRESP_TS_L 0x0C12 + +#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14 +#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16 + +#define PTP_PORT_SYNC_INT BIT(15) +#define PTP_PORT_XDELAY_REQ_INT BIT(14) +#define PTP_PORT_PDELAY_RESP_INT BIT(13) + +#define REG_PTP_PORT_LINK_DELAY__4 0x0C18 + +#define PRIO_QUEUES 4 +#define RX_PRIO_QUEUES 8 + +#define KS_PRIO_IN_REG 2 + +#define TOTAL_PORT_NUM 7 + +#define KSZ9477_COUNTER_NUM 0x20 +#define TOTAL_KSZ9477_COUNTER_NUM (KSZ9477_COUNTER_NUM + 2 + 2) + +#define SWITCH_COUNTER_NUM KSZ9477_COUNTER_NUM +#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ9477_COUNTER_NUM + +#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0 +#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL +#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL +#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE +#define P_PHY_CTRL REG_PORT_PHY_CTRL +#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL +#define P_LINK_STATUS REG_PORT_PHY_STATUS +#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL +#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT + +#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1 +#define S_MIRROR_CTRL REG_SW_MRI_CTRL_0 +#define S_REPLACE_VID_CTRL REG_SW_MAC_CTRL_2 +#define S_802_1P_PRIO_CTRL REG_SW_MAC_802_1P_MAP_0 +#define S_TOS_PRIO_CTRL REG_SW_MAC_TOS_PRIO_0 +#define S_FLUSH_TABLE_CTRL REG_SW_LUE_CTRL_1 + +#define SW_FLUSH_DYN_MAC_TABLE SW_FLUSH_MSTP_TABLE + +#define MAX_TIMESTAMP_UNIT 2 +#define MAX_TRIG_UNIT 3 +#define MAX_TIMESTAMP_EVENT_UNIT 8 +#define MAX_GPIO 4 + +#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1) +#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1) + +/* Driver set switch broadcast storm protection at 10% rate. */ +#define BROADCAST_STORM_PROT_RATE 10 + +/* 148,800 frames * 67 ms / 100 */ +#define BROADCAST_STORM_VALUE 9969 + +#endif /* KSZ9477_REGS_H */ diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c new file mode 100644 index 000000000000..b313ecdf2919 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -0,0 +1,1279 @@ +/* + * Microchip switch driver main logic + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ksz_priv.h" + +static const struct { + int index; + char string[ETH_GSTRING_LEN]; +} mib_names[TOTAL_SWITCH_COUNTER_NUM] = { + { 0x00, "rx_hi" }, + { 0x01, "rx_undersize" }, + { 0x02, "rx_fragments" }, + { 0x03, "rx_oversize" }, + { 0x04, "rx_jabbers" }, + { 0x05, "rx_symbol_err" }, + { 0x06, "rx_crc_err" }, + { 0x07, "rx_align_err" }, + { 0x08, "rx_mac_ctrl" }, + { 0x09, "rx_pause" }, + { 0x0A, "rx_bcast" }, + { 0x0B, "rx_mcast" }, + { 0x0C, "rx_ucast" }, + { 0x0D, "rx_64_or_less" }, + { 0x0E, "rx_65_127" }, + { 0x0F, "rx_128_255" }, + { 0x10, "rx_256_511" }, + { 0x11, "rx_512_1023" }, + { 0x12, "rx_1024_1522" }, + { 0x13, "rx_1523_2000" }, + { 0x14, "rx_2001" }, + { 0x15, "tx_hi" }, + { 0x16, "tx_late_col" }, + { 0x17, "tx_pause" }, + { 0x18, "tx_bcast" }, + { 0x19, "tx_mcast" }, + { 0x1A, "tx_ucast" }, + { 0x1B, "tx_deferred" }, + { 0x1C, "tx_total_col" }, + { 0x1D, "tx_exc_col" }, + { 0x1E, "tx_single_col" }, + { 0x1F, "tx_mult_col" }, + { 0x80, "rx_total" }, + { 0x81, "tx_total" }, + { 0x82, "rx_discards" }, + { 0x83, "tx_discards" }, +}; + +static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) +{ + u8 data; + + ksz_read8(dev, addr, &data); + if (set) + data |= bits; + else + data &= ~bits; + ksz_write8(dev, addr, data); +} + +static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) +{ + u32 data; + + ksz_read32(dev, addr, &data); + if (set) + data |= bits; + else + data &= ~bits; + ksz_write32(dev, addr, data); +} + +static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, + bool set) +{ + u32 addr; + u8 data; + + addr = PORT_CTRL_ADDR(port, offset); + ksz_read8(dev, addr, &data); + + if (set) + data |= bits; + else + data &= ~bits; + + ksz_write8(dev, addr, data); +} + +static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset, + u32 bits, bool set) +{ + u32 addr; + u32 data; + + addr = PORT_CTRL_ADDR(port, offset); + ksz_read32(dev, addr, &data); + + if (set) + data |= bits; + else + data &= ~bits; + + ksz_write32(dev, addr, data); +} + +static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout) +{ + u8 data; + + do { + ksz_read8(dev, REG_SW_VLAN_CTRL, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table) +{ + struct ksz_device *dev = ds->priv; + int ret; + + mutex_lock(&dev->vlan_mutex); + + ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); + ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); + + /* wait to be cleared */ + ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read vlan table\n"); + goto exit; + } + + ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); + ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); + ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); + + ksz_write8(dev, REG_SW_VLAN_CTRL, 0); + +exit: + mutex_unlock(&dev->vlan_mutex); + + return ret; +} + +static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table) +{ + struct ksz_device *dev = ds->priv; + int ret; + + mutex_lock(&dev->vlan_mutex); + + ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); + ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); + ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); + + ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); + ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); + + /* wait to be cleared */ + ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to write vlan table\n"); + goto exit; + } + + ksz_write8(dev, REG_SW_VLAN_CTRL, 0); + + /* update vlan cache table */ + dev->vlan_cache[vid].table[0] = vlan_table[0]; + dev->vlan_cache[vid].table[1] = vlan_table[1]; + dev->vlan_cache[vid].table[2] = vlan_table[2]; + +exit: + mutex_unlock(&dev->vlan_mutex); + + return ret; +} + +static void read_table(struct dsa_switch *ds, u32 *table) +{ + struct ksz_device *dev = ds->priv; + + ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); + ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); + ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); + ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); +} + +static void write_table(struct dsa_switch *ds, u32 *table) +{ + struct ksz_device *dev = ds->priv; + + ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); + ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); + ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); + ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); +} + +static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout) +{ + u32 data; + + do { + ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout) +{ + u32 data; + + do { + ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int ksz_reset_switch(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + u8 data8; + u16 data16; + u32 data32; + + /* reset switch */ + ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); + + /* turn off SPI DO Edge select */ + ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); + data8 &= ~SPI_AUTO_EDGE_DETECTION; + ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); + + /* default configuration */ + ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); + data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | + SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; + ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + + /* disable interrupts */ + ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); + ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); + ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); + + /* set broadcast storm protection 10% rate */ + ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16); + data16 &= ~BROADCAST_STORM_RATE; + data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100; + ksz_write16(dev, REG_SW_MAC_CTRL_2, data16); + + return 0; +} + +static void port_setup(struct ksz_device *dev, int port, bool cpu_port) +{ + u8 data8; + u16 data16; + + /* enable tag tail for host port */ + if (cpu_port) + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, + true); + + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); + + /* set back pressure */ + ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); + + /* set flow control */ + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, + PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true); + + /* enable broadcast storm limit */ + ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); + + /* disable DiffServ priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); + + /* replace priority */ + ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, + false); + ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, + MTI_PVID_REPLACE, false); + + /* enable 802.1p priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); + + /* configure MAC to 1G & RGMII mode */ + ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); + data8 |= PORT_RGMII_ID_EG_ENABLE; + data8 &= ~PORT_MII_NOT_1GBIT; + data8 &= ~PORT_MII_SEL_M; + data8 |= PORT_RGMII_SEL; + ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); + + /* clear pending interrupts */ + ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); +} + +static void ksz_config_cpu_port(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + int i; + + ds->num_ports = dev->port_cnt; + + for (i = 0; i < ds->num_ports; i++) { + if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { + dev->cpu_port = i; + + /* enable cpu port */ + port_setup(dev, i, true); + } + } +} + +static int ksz_setup(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + int ret = 0; + + dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), + dev->num_vlans, GFP_KERNEL); + if (!dev->vlan_cache) + return -ENOMEM; + + ret = ksz_reset_switch(ds); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + /* accept packet up to 2000bytes */ + ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true); + + ksz_config_cpu_port(ds); + + ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true); + + /* queue based egress rate limit */ + ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); + + /* start switch */ + ksz_cfg(dev, REG_SW_OPERATION, SW_START, true); + + return 0; +} + +static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_KSZ; +} + +static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg) +{ + struct ksz_device *dev = ds->priv; + u16 val = 0; + + ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); + + return val; +} + +static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) +{ + struct ksz_device *dev = ds->priv; + + ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); + + return 0; +} + +static int ksz_enable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct ksz_device *dev = ds->priv; + + /* setup slave port */ + port_setup(dev, port, false); + + return 0; +} + +static void ksz_disable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct ksz_device *dev = ds->priv; + + /* there is no port disable */ + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); +} + +static int ksz_sset_count(struct dsa_switch *ds) +{ + return TOTAL_SWITCH_COUNTER_NUM; +} + +static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf) +{ + int i; + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, + ETH_GSTRING_LEN); + } +} + +static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *buf) +{ + struct ksz_device *dev = ds->priv; + int i; + u32 data; + int timeout; + + mutex_lock(&dev->stats_mutex); + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + data = MIB_COUNTER_READ; + data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); + + timeout = 1000; + do { + ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4, + &data); + usleep_range(1, 10); + if (!(data & MIB_COUNTER_READ)) + break; + } while (timeout-- > 0); + + /* failed to read MIB. get out of loop */ + if (!timeout) { + dev_dbg(dev->dev, "Failed to get MIB\n"); + break; + } + + /* count resets upon read */ + ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); + + dev->mib_value[i] += (uint64_t)data; + buf[i] = dev->mib_value[i]; + } + + mutex_unlock(&dev->stats_mutex); +} + +static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct ksz_device *dev = ds->priv; + u8 data; + + ksz_pread8(dev, port, P_STP_CTRL, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, P_STP_CTRL, data); +} + +static void ksz_port_fast_age(struct dsa_switch *ds, int port) +{ + struct ksz_device *dev = ds->priv; + u8 data8; + + ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); + data8 |= SW_FAST_AGING; + ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + + data8 &= ~SW_FAST_AGING; + ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); +} + +static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag) +{ + struct ksz_device *dev = ds->priv; + + if (flag) { + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, + PORT_VLAN_LOOKUP_VID_0, true); + ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true); + ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); + } else { + ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); + ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false); + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, + PORT_VLAN_LOOKUP_VID_0, false); + } + + return 0; +} + +static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + /* nothing needed */ + + return 0; +} + +static void ksz_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct ksz_device *dev = ds->priv; + u32 vlan_table[3]; + u16 vid; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + if (get_vlan_table(ds, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to get vlan table\n"); + return; + } + + vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M); + if (untagged) + vlan_table[1] |= BIT(port); + else + vlan_table[1] &= ~BIT(port); + vlan_table[1] &= ~(BIT(dev->cpu_port)); + + vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); + + if (set_vlan_table(ds, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to set vlan table\n"); + return; + } + + /* change PVID */ + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid); + } +} + +static int ksz_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ksz_device *dev = ds->priv; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + u32 vlan_table[3]; + u16 vid; + u16 pvid; + + ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); + pvid = pvid & 0xFFF; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + if (get_vlan_table(ds, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to get vlan table\n"); + return -ETIMEDOUT; + } + + vlan_table[2] &= ~BIT(port); + + if (pvid == vid) + pvid = 1; + + if (untagged) + vlan_table[1] &= ~BIT(port); + + if (set_vlan_table(ds, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to set vlan table\n"); + return -ETIMEDOUT; + } + } + + ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); + + return 0; +} + +static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + struct ksz_device *dev = ds->priv; + u16 vid; + u16 data; + struct vlan_table *vlan_cache; + int err = 0; + + mutex_lock(&dev->vlan_mutex); + + /* use dev->vlan_cache due to lack of searching valid vlan entry */ + for (vid = vlan->vid_begin; vid < dev->num_vlans; vid++) { + vlan_cache = &dev->vlan_cache[vid]; + + if (!(vlan_cache->table[0] & VLAN_VALID)) + continue; + + vlan->vid_begin = vid; + vlan->vid_end = vid; + vlan->flags = 0; + if (vlan_cache->table[2] & BIT(port)) { + if (vlan_cache->table[1] & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &data); + if (vid == (data & 0xFFFFF)) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + } + + mutex_unlock(&dev->vlan_mutex); + + return err; +} + +static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + /* nothing needed */ + + return 0; +} + +struct alu_struct { + /* entry 1 */ + u8 is_static:1; + u8 is_src_filter:1; + u8 is_dst_filter:1; + u8 prio_age:3; + u32 _reserv_0_1:23; + u8 mstp:3; + /* entry 2 */ + u8 is_override:1; + u8 is_use_fid:1; + u32 _reserv_1_1:23; + u8 port_forward:7; + /* entry 3 & 4*/ + u32 _reserv_2_1:9; + u8 fid:7; + u8 mac[ETH_ALEN]; +}; + +static void ksz_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct ksz_device *dev = ds->priv; + u32 alu_table[4]; + u32 data; + + mutex_lock(&dev->alu_mutex); + + /* find any entry with mac & vid */ + data = fdb->vid << ALU_FID_INDEX_S; + data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + ksz_write32(dev, REG_SW_ALU_INDEX_0, data); + + data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); + data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + ksz_write32(dev, REG_SW_ALU_INDEX_1, data); + + /* start read operation */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); + + /* wait to be finished */ + if (wait_alu_ready(dev, ALU_START, 1000) < 0) { + dev_dbg(dev->dev, "Failed to read ALU\n"); + goto exit; + } + + /* read ALU entry */ + read_table(ds, alu_table); + + /* update ALU entry */ + alu_table[0] = ALU_V_STATIC_VALID; + alu_table[1] |= BIT(port); + if (fdb->vid) + alu_table[1] |= ALU_V_USE_FID; + alu_table[2] = (fdb->vid << ALU_V_FID_S); + alu_table[2] |= ((fdb->addr[0] << 8) | fdb->addr[1]); + alu_table[3] = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); + alu_table[3] |= ((fdb->addr[4] << 8) | fdb->addr[5]); + + write_table(ds, alu_table); + + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); + + /* wait to be finished */ + if (wait_alu_ready(dev, ALU_START, 1000) < 0) + dev_dbg(dev->dev, "Failed to read ALU\n"); + +exit: + mutex_unlock(&dev->alu_mutex); +} + +static int ksz_port_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct ksz_device *dev = ds->priv; + u32 alu_table[4]; + u32 data; + int ret = 0; + + mutex_lock(&dev->alu_mutex); + + /* read any entry with mac & vid */ + data = fdb->vid << ALU_FID_INDEX_S; + data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + ksz_write32(dev, REG_SW_ALU_INDEX_0, data); + + data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); + data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + ksz_write32(dev, REG_SW_ALU_INDEX_1, data); + + /* start read operation */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); + + /* wait to be finished */ + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read ALU\n"); + goto exit; + } + + ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); + if (alu_table[0] & ALU_V_STATIC_VALID) { + ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); + ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); + ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); + + /* clear forwarding port */ + alu_table[2] &= ~BIT(port); + + /* if there is no port to forward, clear table */ + if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { + alu_table[0] = 0; + alu_table[1] = 0; + alu_table[2] = 0; + alu_table[3] = 0; + } + } else { + alu_table[0] = 0; + alu_table[1] = 0; + alu_table[2] = 0; + alu_table[3] = 0; + } + + write_table(ds, alu_table); + + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); + + /* wait to be finished */ + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); + +exit: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static void convert_alu(struct alu_struct *alu, u32 *alu_table) +{ + alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); + alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); + alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); + alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & + ALU_V_PRIO_AGE_CNT_M; + alu->mstp = alu_table[0] & ALU_V_MSTP_M; + + alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); + alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); + alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; + + alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; + + alu->mac[0] = (alu_table[2] >> 8) & 0xFF; + alu->mac[1] = alu_table[2] & 0xFF; + alu->mac[2] = (alu_table[3] >> 24) & 0xFF; + alu->mac[3] = (alu_table[3] >> 16) & 0xFF; + alu->mac[4] = (alu_table[3] >> 8) & 0xFF; + alu->mac[5] = alu_table[3] & 0xFF; +} + +static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + struct ksz_device *dev = ds->priv; + int ret = 0; + u32 data; + u32 alu_table[4]; + struct alu_struct alu; + int timeout; + + mutex_lock(&dev->alu_mutex); + + /* start ALU search */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); + + do { + timeout = 1000; + do { + ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); + if ((data & ALU_VALID) || !(data & ALU_START)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (!timeout) { + dev_dbg(dev->dev, "Failed to search ALU\n"); + ret = -ETIMEDOUT; + goto exit; + } + + /* read ALU table */ + read_table(ds, alu_table); + + convert_alu(&alu, alu_table); + + if (alu.port_forward & BIT(port)) { + fdb->vid = alu.fid; + if (alu.is_static) + fdb->ndm_state = NUD_NOARP; + else + fdb->ndm_state = NUD_REACHABLE; + ether_addr_copy(fdb->addr, alu.mac); + + ret = cb(&fdb->obj); + if (ret) + goto exit; + } + } while (data & ALU_START); + +exit: + + /* stop ALU search */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); + + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + /* nothing to do */ + return 0; +} + +static void ksz_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ksz_device *dev = ds->priv; + u32 static_table[4]; + u32 data; + int index; + u32 mac_hi, mac_lo; + + mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); + mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); + mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); + + mutex_lock(&dev->alu_mutex); + + for (index = 0; index < dev->num_statics; index++) { + /* find empty slot first */ + data = (index << ALU_STAT_INDEX_S) | + ALU_STAT_READ | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) { + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + goto exit; + } + + /* read ALU static table */ + read_table(ds, static_table); + + if (static_table[0] & ALU_V_STATIC_VALID) { + /* check this has same vid & mac address */ + if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) && + ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && + (static_table[3] == mac_lo)) { + /* found matching one */ + break; + } + } else { + /* found empty one */ + break; + } + } + + /* no available entry */ + if (index == dev->num_statics) + goto exit; + + /* add entry */ + static_table[0] = ALU_V_STATIC_VALID; + static_table[1] |= BIT(port); + if (mdb->vid) + static_table[1] |= ALU_V_USE_FID; + static_table[2] = (mdb->vid << ALU_V_FID_S); + static_table[2] |= mac_hi; + static_table[3] = mac_lo; + + write_table(ds, static_table); + + data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + +exit: + mutex_unlock(&dev->alu_mutex); +} + +static int ksz_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ksz_device *dev = ds->priv; + u32 static_table[4]; + u32 data; + int index; + int ret = 0; + u32 mac_hi, mac_lo; + + mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); + mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); + mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); + + mutex_lock(&dev->alu_mutex); + + for (index = 0; index < dev->num_statics; index++) { + /* find empty slot first */ + data = (index << ALU_STAT_INDEX_S) | + ALU_STAT_READ | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + goto exit; + } + + /* read ALU static table */ + read_table(ds, static_table); + + if (static_table[0] & ALU_V_STATIC_VALID) { + /* check this has same vid & mac address */ + + if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) && + ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && + (static_table[3] == mac_lo)) { + /* found matching one */ + break; + } + } + } + + /* no available entry */ + if (index == dev->num_statics) { + ret = -EINVAL; + goto exit; + } + + /* clear port */ + static_table[1] &= ~BIT(port); + + if ((static_table[1] & ALU_V_PORT_MAP) == 0) { + /* delete entry */ + static_table[0] = 0; + static_table[1] = 0; + static_table[2] = 0; + static_table[3] = 0; + } + + write_table(ds, static_table); + + data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + +exit: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz_port_mdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_mdb *mdb, + switchdev_obj_dump_cb_t *cb) +{ + /* this is not called by switch layer */ + return 0; +} + +static int ksz_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + struct ksz_device *dev = ds->priv; + + if (ingress) + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); + else + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); + + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); + + /* configure mirror port */ + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, true); + + ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + + return 0; +} + +static void ksz_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ksz_device *dev = ds->priv; + u8 data; + + if (mirror->ingress) + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); + else + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + + if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, false); +} + +static const struct dsa_switch_ops ksz_switch_ops = { + .get_tag_protocol = ksz_get_tag_protocol, + .setup = ksz_setup, + .phy_read = ksz_phy_read16, + .phy_write = ksz_phy_write16, + .port_enable = ksz_enable_port, + .port_disable = ksz_disable_port, + .get_strings = ksz_get_strings, + .get_ethtool_stats = ksz_get_ethtool_stats, + .get_sset_count = ksz_sset_count, + .port_stp_state_set = ksz_port_stp_state_set, + .port_fast_age = ksz_port_fast_age, + .port_vlan_filtering = ksz_port_vlan_filtering, + .port_vlan_prepare = ksz_port_vlan_prepare, + .port_vlan_add = ksz_port_vlan_add, + .port_vlan_del = ksz_port_vlan_del, + .port_vlan_dump = ksz_port_vlan_dump, + .port_fdb_prepare = ksz_port_fdb_prepare, + .port_fdb_dump = ksz_port_fdb_dump, + .port_fdb_add = ksz_port_fdb_add, + .port_fdb_del = ksz_port_fdb_del, + .port_mdb_prepare = ksz_port_mdb_prepare, + .port_mdb_add = ksz_port_mdb_add, + .port_mdb_del = ksz_port_mdb_del, + .port_mdb_dump = ksz_port_mdb_dump, + .port_mirror_add = ksz_port_mirror_add, + .port_mirror_del = ksz_port_mirror_del, +}; + +struct ksz_chip_data { + u32 chip_id; + const char *dev_name; + int num_vlans; + int num_alus; + int num_statics; + int cpu_ports; + int port_cnt; +}; + +static const struct ksz_chip_data ksz_switch_chips[] = { + { + .chip_id = 0x00947700, + .dev_name = "KSZ9477", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + }, +}; + +static int ksz_switch_init(struct ksz_device *dev) +{ + int i; + + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + + dev->ds->ops = &ksz_switch_ops; + + for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz_switch_chips[i]; + + if (dev->chip_id == chip->chip_id) { + dev->name = chip->dev_name; + dev->num_vlans = chip->num_vlans; + dev->num_alus = chip->num_alus; + dev->num_statics = chip->num_statics; + dev->port_cnt = chip->port_cnt; + dev->cpu_ports = chip->cpu_ports; + + break; + } + } + + /* no switch found */ + if (!dev->port_cnt) + return -ENODEV; + + return 0; +} + +struct ksz_device *ksz_switch_alloc(struct device *base, + const struct ksz_io_ops *ops, + void *priv) +{ + struct dsa_switch *ds; + struct ksz_device *swdev; + + ds = dsa_switch_alloc(base, DSA_MAX_PORTS); + if (!ds) + return NULL; + + swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL); + if (!swdev) + return NULL; + + ds->priv = swdev; + swdev->dev = base; + + swdev->ds = ds; + swdev->priv = priv; + swdev->ops = ops; + + return swdev; +} +EXPORT_SYMBOL(ksz_switch_alloc); + +int ksz_switch_detect(struct ksz_device *dev) +{ + u8 data8; + u32 id32; + int ret; + + /* turn off SPI DO Edge select */ + ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); + if (ret) + return ret; + + data8 &= ~SPI_AUTO_EDGE_DETECTION; + ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); + if (ret) + return ret; + + /* read chip id */ + ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32); + if (ret) + return ret; + + dev->chip_id = id32; + + return 0; +} +EXPORT_SYMBOL(ksz_switch_detect); + +int ksz_switch_register(struct ksz_device *dev) +{ + int ret; + + if (dev->pdata) + dev->chip_id = dev->pdata->chip_id; + + if (ksz_switch_detect(dev)) + return -EINVAL; + + ret = ksz_switch_init(dev); + if (ret) + return ret; + + return dsa_register_switch(dev->ds); +} +EXPORT_SYMBOL(ksz_switch_register); + +void ksz_switch_remove(struct ksz_device *dev) +{ + dsa_unregister_switch(dev->ds); +} +EXPORT_SYMBOL(ksz_switch_remove); + +MODULE_AUTHOR("Woojung Huh "); +MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h new file mode 100644 index 000000000000..2a98dbd51456 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_priv.h @@ -0,0 +1,210 @@ +/* + * Microchip KSZ series switch common definitions + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __KSZ_PRIV_H +#define __KSZ_PRIV_H + +#include +#include +#include +#include +#include + +#include "ksz_9477_reg.h" + +struct ksz_io_ops; + +struct vlan_table { + u32 table[3]; +}; + +struct ksz_device { + struct dsa_switch *ds; + struct ksz_platform_data *pdata; + const char *name; + + struct mutex reg_mutex; /* register access */ + struct mutex stats_mutex; /* status access */ + struct mutex alu_mutex; /* ALU access */ + struct mutex vlan_mutex; /* vlan access */ + const struct ksz_io_ops *ops; + + struct device *dev; + + void *priv; + + /* chip specific data */ + u32 chip_id; + int num_vlans; + int num_alus; + int num_statics; + int cpu_port; /* port connected to CPU */ + int cpu_ports; /* port bitmap can be cpu port */ + int port_cnt; + + struct vlan_table *vlan_cache; + + u64 mib_value[TOTAL_SWITCH_COUNTER_NUM]; +}; + +struct ksz_io_ops { + int (*read8)(struct ksz_device *dev, u32 reg, u8 *value); + int (*read16)(struct ksz_device *dev, u32 reg, u16 *value); + int (*read24)(struct ksz_device *dev, u32 reg, u32 *value); + int (*read32)(struct ksz_device *dev, u32 reg, u32 *value); + int (*write8)(struct ksz_device *dev, u32 reg, u8 value); + int (*write16)(struct ksz_device *dev, u32 reg, u16 value); + int (*write24)(struct ksz_device *dev, u32 reg, u32 value); + int (*write32)(struct ksz_device *dev, u32 reg, u32 value); + int (*phy_read16)(struct ksz_device *dev, int addr, int reg, + u16 *value); + int (*phy_write16)(struct ksz_device *dev, int addr, int reg, + u16 value); +}; + +struct ksz_device *ksz_switch_alloc(struct device *base, + const struct ksz_io_ops *ops, void *priv); +int ksz_switch_detect(struct ksz_device *dev); +int ksz_switch_register(struct ksz_device *dev); +void ksz_switch_remove(struct ksz_device *dev); + +static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read8(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read16(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read24(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read32(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write8(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write16(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write24(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write32(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, + u8 *data) +{ + ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data); +} + +static inline void ksz_pread16(struct ksz_device *dev, int port, int offset, + u16 *data) +{ + ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data); +} + +static inline void ksz_pread32(struct ksz_device *dev, int port, int offset, + u32 *data) +{ + ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data); +} + +static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset, + u8 data) +{ + ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data); +} + +static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset, + u16 data) +{ + ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data); +} + +static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, + u32 data) +{ + ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data); +} + +#endif diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c new file mode 100644 index 000000000000..c51946983bed --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -0,0 +1,216 @@ +/* + * Microchip KSZ series register access through SPI + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include +#include +#include +#include + +#include "ksz_priv.h" + +/* SPI frame opcodes */ +#define KS_SPIOP_RD 3 +#define KS_SPIOP_WR 2 + +#define SPI_ADDR_SHIFT 24 +#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1) +#define SPI_TURNAROUND_SHIFT 5 + +static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val, + unsigned int len) +{ + u32 txbuf; + int ret; + + txbuf = reg & SPI_ADDR_MASK; + txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT; + txbuf <<= SPI_TURNAROUND_SHIFT; + txbuf = cpu_to_be32(txbuf); + + ret = spi_write_then_read(spi, &txbuf, 4, val, len); + return ret; +} + +static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data, + unsigned int len) +{ + struct spi_device *spi = dev->priv; + + return ksz_spi_read_reg(spi, reg, data, len); +} + +static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val) +{ + return ksz_spi_read(dev, reg, val, 1); +} + +static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val) +{ + int ret = ksz_spi_read(dev, reg, (u8 *)val, 2); + + if (!ret) + *val = be16_to_cpu(*val); + + return ret; +} + +static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + *val = 0; + ret = ksz_spi_read(dev, reg, (u8 *)val, 3); + if (!ret) { + *val = be32_to_cpu(*val); + /* convert to 24bit */ + *val >>= 8; + } + + return ret; +} + +static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret = ksz_spi_read(dev, reg, (u8 *)val, 4); + + if (!ret) + *val = be32_to_cpu(*val); + + return ret; +} + +static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val, + unsigned int len) +{ + u32 txbuf; + u8 data[12]; + int i; + + txbuf = reg & SPI_ADDR_MASK; + txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT); + txbuf <<= SPI_TURNAROUND_SHIFT; + txbuf = cpu_to_be32(txbuf); + + data[0] = txbuf & 0xFF; + data[1] = (txbuf & 0xFF00) >> 8; + data[2] = (txbuf & 0xFF0000) >> 16; + data[3] = (txbuf & 0xFF000000) >> 24; + for (i = 0; i < len; i++) + data[i + 4] = val[i]; + + return spi_write(spi, &data, 4 + len); +} + +static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value) +{ + struct spi_device *spi = dev->priv; + + return ksz_spi_write_reg(spi, reg, &value, 1); +} + +static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value) +{ + struct spi_device *spi = dev->priv; + + value = cpu_to_be16(value); + return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2); +} + +static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value) +{ + struct spi_device *spi = dev->priv; + + /* make it to big endian 24bit from MSB */ + value <<= 8; + value = cpu_to_be32(value); + return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3); +} + +static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value) +{ + struct spi_device *spi = dev->priv; + + value = cpu_to_be32(value); + return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4); +} + +static const struct ksz_io_ops ksz_spi_ops = { + .read8 = ksz_spi_read8, + .read16 = ksz_spi_read16, + .read24 = ksz_spi_read24, + .read32 = ksz_spi_read32, + .write8 = ksz_spi_write8, + .write16 = ksz_spi_write16, + .write24 = ksz_spi_write24, + .write32 = ksz_spi_write32, +}; + +static int ksz_spi_probe(struct spi_device *spi) +{ + struct ksz_device *dev; + int ret; + + dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi); + if (!dev) + return -ENOMEM; + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + ret = ksz_switch_register(dev); + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int ksz_spi_remove(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev) + ksz_switch_remove(dev); + + return 0; +} + +static const struct of_device_id ksz_dt_ids[] = { + { .compatible = "microchip,ksz9477" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ksz_dt_ids); + +static struct spi_driver ksz_spi_driver = { + .driver = { + .name = "ksz9477-switch", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ksz_dt_ids), + }, + .probe = ksz_spi_probe, + .remove = ksz_spi_remove, +}; + +module_spi_driver(ksz_spi_driver); + +MODULE_AUTHOR("Woojung Huh "); +MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h new file mode 100644 index 000000000000..84789ca634aa --- /dev/null +++ b/include/linux/platform_data/microchip-ksz.h @@ -0,0 +1,29 @@ +/* + * Microchip KSZ series switch platform data + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MICROCHIP_KSZ_H +#define __MICROCHIP_KSZ_H + +#include + +struct ksz_platform_data { + u32 chip_id; + u16 enabled_ports; +}; + +#endif -- cgit v1.2.3 From 726fdbe9fa7ebccda1579716f68f8bae6fa9c87a Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Thu, 1 Jun 2017 15:29:06 +0300 Subject: qed: Encapsulate interrupt counters in struct We already have an API struct that contains interrupt-related numbers. Use it to encapsulate all information relating to the status of SBs as (used|free). Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 4 ++-- drivers/net/ethernet/qlogic/qed/qed_int.c | 20 +++++++++----------- drivers/net/ethernet/qlogic/qed/qed_int.h | 10 +++++----- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 9 ++++----- include/linux/qed/qed_if.h | 12 +++++++++--- 6 files changed, 30 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3b6114d4461a..1fff0473ddbb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2061,7 +2061,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), - sb_cnt_info.sb_iov_cnt); + sb_cnt_info.iov_cnt); feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) - non_l2_sbs, @@ -2255,7 +2255,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, case QED_SB: memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - *p_resc_num = sb_cnt_info.sb_cnt; + *p_resc_num = sb_cnt_info.cnt; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 3307978f6eeb..c9164e27a1b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1769,7 +1769,7 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, bool b_set, bool b_slowpath) { u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; - u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; + u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->usage.cnt; u32 igu_sb_id = 0, val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); @@ -1827,7 +1827,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Initialize base sb / sb cnt for PFs and VFs */ p_igu_info->igu_base_sb = 0xffff; - p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_base_sb_iov = 0xffff; /* Distinguish between existent and non-existent default SB */ @@ -1856,7 +1855,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) { if (p_igu_info->igu_base_sb == 0xffff) p_igu_info->igu_base_sb = igu_sb_id; - p_igu_info->igu_sb_cnt++; + p_igu_info->usage.cnt++; } } else if (!(p_block->is_pf) && (p_block->function_id >= min_vf) && @@ -1867,7 +1866,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (p_igu_info->igu_base_sb_iov == 0xffff) p_igu_info->igu_base_sb_iov = igu_sb_id; - p_igu_info->free_blks++; + p_igu_info->usage.iov_cnt++; } /* Mark the First entry belonging to the PF or its VFs @@ -1900,12 +1899,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } /* All non default SB are considered free at this point */ - p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n", p_igu_info->igu_dsb_id, - p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov); + p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); return 0; } @@ -2003,9 +2003,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, if (!info || !p_sb_cnt_info) return; - p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; - p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; - p_sb_cnt_info->sb_free_blk = info->free_blks; + memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); } u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) @@ -2014,10 +2012,10 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) /* Determine origin of SB id */ if ((sb_id >= p_info->igu_base_sb) && - (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { + (sb_id < p_info->igu_base_sb + p_info->usage.cnt)) { return sb_id - p_info->igu_base_sb; } else if ((sb_id >= p_info->igu_base_sb_iov) && - (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { + (sb_id < p_info->igu_base_sb_iov + p_info->usage.iov_cnt)) { /* We want the first VF queue to be adjacent to the * last PF queue. Since L2 queues can be partial to * SBs, we'll use the feature instead. diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 60aaf9f9bb78..5a0e8f02c969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -216,11 +216,11 @@ struct qed_igu_block { struct qed_igu_info { struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; u16 igu_dsb_id; - u16 igu_base_sb; - u16 igu_base_sb_iov; - u16 igu_sb_cnt; - u16 igu_sb_cnt_iov; - u16 free_blks; + + u16 igu_base_sb; + u16 igu_base_sb_iov; + struct qed_sb_cnt_info usage; + }; /* TODO Names of function may change... */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c5bb80b9afc1..ac3bdcd9f0b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); - cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors += sb_cnt_info.cnt; cdev->int_params.in.num_vectors++; /* slowpath */ } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 0827a4187dc7..62b207a80a03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -874,9 +874,9 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, igu_blocks = p_hwfn->hw_info.p_igu_info->entry; - if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) - num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; - p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); @@ -932,8 +932,7 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, addr, val); p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; - - p_hwfn->hw_info.p_igu_info->free_blks++; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; } vf->num_sbs = 0; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 73c46d6d5727..607e1c5e185a 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -886,9 +886,15 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - int sb_cnt; - int sb_iov_cnt; - int sb_free_blk; + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) -- cgit v1.2.3 From ec33d71de7309c50531c2ae0eb178244899e6e46 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 31 May 2017 09:18:33 +0200 Subject: net-next: stmmac: add optional setup function Instead of adding more ifthen logic for adding a new mac_device_info setup function, it is easier to add a function pointer to the function needed. Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 +++- include/linux/stmmac.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f158273eab9b..c80c9c3b67db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3933,7 +3933,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) struct mac_device_info *mac; /* Identify the MAC HW device */ - if (priv->plat->has_gmac) { + if (priv->plat->setup) { + mac = priv->plat->setup(priv); + } else if (priv->plat->has_gmac) { priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 3921cb9dfadb..8bb550bca96d 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -177,6 +177,7 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; -- cgit v1.2.3 From 9f93ac8d4085f718d3c7c5fedcb98dbdd2287648 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 31 May 2017 09:18:36 +0200 Subject: net-next: stmmac: Add dwmac-sun8i The dwmac-sun8i is a heavy hacked version of stmmac hardware by allwinner. In fact the only common part is the descriptor management and the first register function. Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 11 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 990 +++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 15 + .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 9 +- include/linux/stmmac.h | 1 + 6 files changed, 1025 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c (limited to 'include') diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index cfbe3634dfa1..85c0e41f8021 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -145,6 +145,17 @@ config DWMAC_SUNXI This selects Allwinner SoC glue layer support for the stmmac device driver. This driver is used for A20/A31 GMAC ethernet controller. + +config DWMAC_SUN8I + tristate "Allwinner sun8i GMAC support" + default ARCH_SUNXI + depends on OF && (ARCH_SUNXI || COMPILE_TEST) + ---help--- + Support for Allwinner H3 A83T A64 EMAC ethernet controllers. + + This selects Allwinner SoC glue layer support for the + stmmac device driver. This driver is used for H3/A83T/A64 + EMAC ethernet controller. endif config STMMAC_PCI diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 700c60336674..fd4937a7fcab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o +obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c new file mode 100644 index 000000000000..1a6bfe6c958f --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -0,0 +1,990 @@ +/* + * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer + * + * Copyright (C) 2017 Corentin Labbe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +/* General notes on dwmac-sun8i: + * Locking: no locking is necessary in this file because all necessary locking + * is done in the "stmmac files" + */ + +/* struct emac_variant - Descrive dwmac-sun8i hardware variant + * @default_syscon_value: The default value of the EMAC register in syscon + * This value is used for disabling properly EMAC + * and used as a good starting value in case of the + * boot process(uboot) leave some stuff. + * @internal_phy: Does the MAC embed an internal PHY + * @support_mii: Does the MAC handle MII + * @support_rmii: Does the MAC handle RMII + * @support_rgmii: Does the MAC handle RGMII + */ +struct emac_variant { + u32 default_syscon_value; + int internal_phy; + bool support_mii; + bool support_rmii; + bool support_rgmii; +}; + +/* struct sunxi_priv_data - hold all sunxi private data + * @tx_clk: reference to MAC TX clock + * @ephy_clk: reference to the optional EPHY clock for the internal PHY + * @regulator: reference to the optional regulator + * @rst_ephy: reference to the optional EPHY reset for the internal PHY + * @variant: reference to the current board variant + * @regmap: regmap for using the syscon + * @use_internal_phy: Does the current PHY choice imply using the internal PHY + */ +struct sunxi_priv_data { + struct clk *tx_clk; + struct clk *ephy_clk; + struct regulator *regulator; + struct reset_control *rst_ephy; + const struct emac_variant *variant; + struct regmap *regmap; + bool use_internal_phy; +}; + +static const struct emac_variant emac_variant_h3 = { + .default_syscon_value = 0x58000, + .internal_phy = PHY_INTERFACE_MODE_MII, + .support_mii = true, + .support_rmii = true, + .support_rgmii = true +}; + +static const struct emac_variant emac_variant_a83t = { + .default_syscon_value = 0, + .internal_phy = 0, + .support_mii = true, + .support_rgmii = true +}; + +static const struct emac_variant emac_variant_a64 = { + .default_syscon_value = 0, + .internal_phy = 0, + .support_mii = true, + .support_rmii = true, + .support_rgmii = true +}; + +#define EMAC_BASIC_CTL0 0x00 +#define EMAC_BASIC_CTL1 0x04 +#define EMAC_INT_STA 0x08 +#define EMAC_INT_EN 0x0C +#define EMAC_TX_CTL0 0x10 +#define EMAC_TX_CTL1 0x14 +#define EMAC_TX_FLOW_CTL 0x1C +#define EMAC_TX_DESC_LIST 0x20 +#define EMAC_RX_CTL0 0x24 +#define EMAC_RX_CTL1 0x28 +#define EMAC_RX_DESC_LIST 0x34 +#define EMAC_RX_FRM_FLT 0x38 +#define EMAC_MDIO_CMD 0x48 +#define EMAC_MDIO_DATA 0x4C +#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8) +#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8) +#define EMAC_TX_DMA_STA 0xB0 +#define EMAC_TX_CUR_DESC 0xB4 +#define EMAC_TX_CUR_BUF 0xB8 +#define EMAC_RX_DMA_STA 0xC0 +#define EMAC_RX_CUR_DESC 0xC4 +#define EMAC_RX_CUR_BUF 0xC8 + +/* Use in EMAC_BASIC_CTL0 */ +#define EMAC_DUPLEX_FULL BIT(0) +#define EMAC_LOOPBACK BIT(1) +#define EMAC_SPEED_1000 0 +#define EMAC_SPEED_100 (0x03 << 2) +#define EMAC_SPEED_10 (0x02 << 2) + +/* Use in EMAC_BASIC_CTL1 */ +#define EMAC_BURSTLEN_SHIFT 24 + +/* Used in EMAC_RX_FRM_FLT */ +#define EMAC_FRM_FLT_RXALL BIT(0) +#define EMAC_FRM_FLT_CTL BIT(13) +#define EMAC_FRM_FLT_MULTICAST BIT(16) + +/* Used in RX_CTL1*/ +#define EMAC_RX_MD BIT(1) +#define EMAC_RX_TH_MASK GENMASK(4, 5) +#define EMAC_RX_TH_32 0 +#define EMAC_RX_TH_64 (0x1 << 4) +#define EMAC_RX_TH_96 (0x2 << 4) +#define EMAC_RX_TH_128 (0x3 << 4) +#define EMAC_RX_DMA_EN BIT(30) +#define EMAC_RX_DMA_START BIT(31) + +/* Used in TX_CTL1*/ +#define EMAC_TX_MD BIT(1) +#define EMAC_TX_NEXT_FRM BIT(2) +#define EMAC_TX_TH_MASK GENMASK(8, 10) +#define EMAC_TX_TH_64 0 +#define EMAC_TX_TH_128 (0x1 << 8) +#define EMAC_TX_TH_192 (0x2 << 8) +#define EMAC_TX_TH_256 (0x3 << 8) +#define EMAC_TX_DMA_EN BIT(30) +#define EMAC_TX_DMA_START BIT(31) + +/* Used in RX_CTL0 */ +#define EMAC_RX_RECEIVER_EN BIT(31) +#define EMAC_RX_DO_CRC BIT(27) +#define EMAC_RX_FLOW_CTL_EN BIT(16) + +/* Used in TX_CTL0 */ +#define EMAC_TX_TRANSMITTER_EN BIT(31) + +/* Used in EMAC_TX_FLOW_CTL */ +#define EMAC_TX_FLOW_CTL_EN BIT(0) + +/* Used in EMAC_INT_STA */ +#define EMAC_TX_INT BIT(0) +#define EMAC_TX_DMA_STOP_INT BIT(1) +#define EMAC_TX_BUF_UA_INT BIT(2) +#define EMAC_TX_TIMEOUT_INT BIT(3) +#define EMAC_TX_UNDERFLOW_INT BIT(4) +#define EMAC_TX_EARLY_INT BIT(5) +#define EMAC_RX_INT BIT(8) +#define EMAC_RX_BUF_UA_INT BIT(9) +#define EMAC_RX_DMA_STOP_INT BIT(10) +#define EMAC_RX_TIMEOUT_INT BIT(11) +#define EMAC_RX_OVERFLOW_INT BIT(12) +#define EMAC_RX_EARLY_INT BIT(13) +#define EMAC_RGMII_STA_INT BIT(16) + +#define MAC_ADDR_TYPE_DST BIT(31) + +/* H3 specific bits for EPHY */ +#define H3_EPHY_ADDR_SHIFT 20 +#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */ +#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */ +#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */ + +/* H3/A64 specific bits */ +#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */ + +/* Generic system control EMAC_CLK bits */ +#define SYSCON_ETXDC_MASK GENMASK(2, 0) +#define SYSCON_ETXDC_SHIFT 10 +#define SYSCON_ERXDC_MASK GENMASK(4, 0) +#define SYSCON_ERXDC_SHIFT 5 +/* EMAC PHY Interface Type */ +#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */ +#define SYSCON_ETCS_MASK GENMASK(1, 0) +#define SYSCON_ETCS_MII 0x0 +#define SYSCON_ETCS_EXT_GMII 0x1 +#define SYSCON_ETCS_INT_GMII 0x2 +#define SYSCON_EMAC_REG 0x30 + +/* sun8i_dwmac_dma_reset() - reset the EMAC + * Called from stmmac via stmmac_dma_ops->reset + */ +static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) +{ + writel(0, ioaddr + EMAC_RX_CTL1); + writel(0, ioaddr + EMAC_TX_CTL1); + writel(0, ioaddr + EMAC_RX_FRM_FLT); + writel(0, ioaddr + EMAC_RX_DESC_LIST); + writel(0, ioaddr + EMAC_TX_DESC_LIST); + writel(0, ioaddr + EMAC_INT_EN); + writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); + return 0; +} + +/* sun8i_dwmac_dma_init() - initialize the EMAC + * Called from stmmac via stmmac_dma_ops->init + */ +static void sun8i_dwmac_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx, u32 dma_rx, int atds) +{ + /* Write TX and RX descriptors address */ + writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST); + writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST); + + writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); + writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); +} + +/* sun8i_dwmac_dump_regs() - Dump EMAC address space + * Called from stmmac_dma_ops->dump_regs + * Used for ethtool + */ +static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < 0xC8; i += 4) { + if (i == 0x32 || i == 0x3C) + continue; + reg_space[i / 4] = readl(ioaddr + i); + } +} + +/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space + * Called from stmmac_ops->dump_regs + * Used for ethtool + */ +static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw, + u32 *reg_space) +{ + int i; + void __iomem *ioaddr = hw->pcsr; + + for (i = 0; i < 0xC8; i += 4) { + if (i == 0x32 || i == 0x3C) + continue; + reg_space[i / 4] = readl(ioaddr + i); + } +} + +static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) +{ + writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); +} + +static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) +{ + writel(0, ioaddr + EMAC_INT_EN); +} + +static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 v; + + v = readl(ioaddr + EMAC_TX_CTL1); + v |= EMAC_TX_DMA_START; + v |= EMAC_TX_DMA_EN; + writel(v, ioaddr + EMAC_TX_CTL1); +} + +static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr) +{ + u32 v; + + v = readl(ioaddr + EMAC_TX_CTL1); + v |= EMAC_TX_DMA_START; + v |= EMAC_TX_DMA_EN; + writel(v, ioaddr + EMAC_TX_CTL1); +} + +static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 v; + + v = readl(ioaddr + EMAC_TX_CTL1); + v &= ~EMAC_TX_DMA_EN; + writel(v, ioaddr + EMAC_TX_CTL1); +} + +static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL1); + v |= EMAC_RX_DMA_START; + v |= EMAC_RX_DMA_EN; + writel(v, ioaddr + EMAC_RX_CTL1); +} + +static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL1); + v &= ~EMAC_RX_DMA_EN; + writel(v, ioaddr + EMAC_RX_CTL1); +} + +static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan) +{ + u32 v; + int ret = 0; + + v = readl(ioaddr + EMAC_INT_STA); + + if (v & EMAC_TX_INT) { + ret |= handle_tx; + x->tx_normal_irq_n++; + } + + if (v & EMAC_TX_DMA_STOP_INT) + x->tx_process_stopped_irq++; + + if (v & EMAC_TX_BUF_UA_INT) + x->tx_process_stopped_irq++; + + if (v & EMAC_TX_TIMEOUT_INT) + ret |= tx_hard_error; + + if (v & EMAC_TX_UNDERFLOW_INT) { + ret |= tx_hard_error; + x->tx_undeflow_irq++; + } + + if (v & EMAC_TX_EARLY_INT) + x->tx_early_irq++; + + if (v & EMAC_RX_INT) { + ret |= handle_rx; + x->rx_normal_irq_n++; + } + + if (v & EMAC_RX_BUF_UA_INT) + x->rx_buf_unav_irq++; + + if (v & EMAC_RX_DMA_STOP_INT) + x->rx_process_stopped_irq++; + + if (v & EMAC_RX_TIMEOUT_INT) + ret |= tx_hard_error; + + if (v & EMAC_RX_OVERFLOW_INT) { + ret |= tx_hard_error; + x->rx_overflow_irq++; + } + + if (v & EMAC_RX_EARLY_INT) + x->rx_early_irq++; + + if (v & EMAC_RGMII_STA_INT) + x->irq_rgmii_n++; + + writel(v, ioaddr + EMAC_INT_STA); + + return ret; +} + +static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, + int rxmode, int rxfifosz) +{ + u32 v; + + v = readl(ioaddr + EMAC_TX_CTL1); + if (txmode == SF_DMA_MODE) { + v |= EMAC_TX_MD; + /* Undocumented bit (called TX_NEXT_FRM in BSP), the original + * comment is + * "Operating on second frame increase the performance + * especially when transmit store-and-forward is used." + */ + v |= EMAC_TX_NEXT_FRM; + } else { + v &= ~EMAC_TX_MD; + v &= ~EMAC_TX_TH_MASK; + if (txmode < 64) + v |= EMAC_TX_TH_64; + else if (txmode < 128) + v |= EMAC_TX_TH_128; + else if (txmode < 192) + v |= EMAC_TX_TH_192; + else if (txmode < 256) + v |= EMAC_TX_TH_256; + } + writel(v, ioaddr + EMAC_TX_CTL1); + + v = readl(ioaddr + EMAC_RX_CTL1); + if (rxmode == SF_DMA_MODE) { + v |= EMAC_RX_MD; + } else { + v &= ~EMAC_RX_MD; + v &= ~EMAC_RX_TH_MASK; + if (rxmode < 32) + v |= EMAC_RX_TH_32; + else if (rxmode < 64) + v |= EMAC_RX_TH_64; + else if (rxmode < 96) + v |= EMAC_RX_TH_96; + else if (rxmode < 128) + v |= EMAC_RX_TH_128; + } + writel(v, ioaddr + EMAC_RX_CTL1); +} + +static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { + .reset = sun8i_dwmac_dma_reset, + .init = sun8i_dwmac_dma_init, + .dump_regs = sun8i_dwmac_dump_regs, + .dma_mode = sun8i_dwmac_dma_operation_mode, + .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission, + .enable_dma_irq = sun8i_dwmac_enable_dma_irq, + .disable_dma_irq = sun8i_dwmac_disable_dma_irq, + .start_tx = sun8i_dwmac_dma_start_tx, + .stop_tx = sun8i_dwmac_dma_stop_tx, + .start_rx = sun8i_dwmac_dma_start_rx, + .stop_rx = sun8i_dwmac_dma_stop_rx, + .dma_interrupt = sun8i_dwmac_dma_interrupt, +}; + +static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct sunxi_priv_data *gmac = priv; + int ret; + + if (gmac->regulator) { + ret = regulator_enable(gmac->regulator); + if (ret) { + dev_err(&pdev->dev, "Fail to enable regulator\n"); + return ret; + } + } + + ret = clk_prepare_enable(gmac->tx_clk); + if (ret) { + if (gmac->regulator) + regulator_disable(gmac->regulator); + dev_err(&pdev->dev, "Could not enable AHB clock\n"); + return ret; + } + + return 0; +} + +static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu) +{ + void __iomem *ioaddr = hw->pcsr; + u32 v; + + v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */ + writel(v, ioaddr + EMAC_BASIC_CTL1); +} + +static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 t, r; + + t = readl(ioaddr + EMAC_TX_CTL0); + r = readl(ioaddr + EMAC_RX_CTL0); + if (enable) { + t |= EMAC_TX_TRANSMITTER_EN; + r |= EMAC_RX_RECEIVER_EN; + } else { + t &= ~EMAC_TX_TRANSMITTER_EN; + r &= ~EMAC_RX_RECEIVER_EN; + } + writel(t, ioaddr + EMAC_TX_CTL0); + writel(r, ioaddr + EMAC_RX_CTL0); +} + +/* Set MAC address at slot reg_n + * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST + * If addr is NULL, clear the slot + */ +static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 v; + + if (!addr) { + writel(0, ioaddr + EMAC_MACADDR_HI(reg_n)); + return; + } + + stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n), + EMAC_MACADDR_LO(reg_n)); + if (reg_n > 0) { + v = readl(ioaddr + EMAC_MACADDR_HI(reg_n)); + v |= MAC_ADDR_TYPE_DST; + writel(v, ioaddr + EMAC_MACADDR_HI(reg_n)); + } +} + +static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n), + EMAC_MACADDR_LO(reg_n)); +} + +/* caution this function must return non 0 to work */ +static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL0); + v |= EMAC_RX_DO_CRC; + writel(v, ioaddr + EMAC_RX_CTL0); + + return 1; +} + +static void sun8i_dwmac_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 v; + int i = 1; + struct netdev_hw_addr *ha; + int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1; + + v = EMAC_FRM_FLT_CTL; + + if (dev->flags & IFF_PROMISC) { + v = EMAC_FRM_FLT_RXALL; + } else if (dev->flags & IFF_ALLMULTI) { + v |= EMAC_FRM_FLT_MULTICAST; + } else if (macaddrs <= hw->unicast_filter_entries) { + if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) { + sun8i_dwmac_set_umac_addr(hw, ha->addr, i); + i++; + } + } + if (!netdev_uc_empty(dev)) { + netdev_for_each_uc_addr(ha, dev) { + sun8i_dwmac_set_umac_addr(hw, ha->addr, i); + i++; + } + } + } else { + netdev_info(dev, "Too many address, switching to promiscuous\n"); + v = EMAC_FRM_FLT_RXALL; + } + + /* Disable unused address filter slots */ + while (i < hw->unicast_filter_entries) + sun8i_dwmac_set_umac_addr(hw, NULL, i++); + + writel(v, ioaddr + EMAC_RX_FRM_FLT); +} + +static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw, + unsigned int duplex, unsigned int fc, + unsigned int pause_time, u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL0); + if (fc == FLOW_AUTO) + v |= EMAC_RX_FLOW_CTL_EN; + else + v &= ~EMAC_RX_FLOW_CTL_EN; + writel(v, ioaddr + EMAC_RX_CTL0); + + v = readl(ioaddr + EMAC_TX_FLOW_CTL); + if (fc == FLOW_AUTO) + v |= EMAC_TX_FLOW_CTL_EN; + else + v &= ~EMAC_TX_FLOW_CTL_EN; + writel(v, ioaddr + EMAC_TX_FLOW_CTL); +} + +static int sun8i_dwmac_reset(struct stmmac_priv *priv) +{ + u32 v; + int err; + + v = readl(priv->ioaddr + EMAC_BASIC_CTL1); + writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1); + + /* The timeout was previoulsy set to 10ms, but some board (OrangePI0) + * need more if no cable plugged. 100ms seems OK + */ + err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v, + !(v & 0x01), 100, 100000); + + if (err) { + dev_err(priv->device, "EMAC reset timeout\n"); + return -EFAULT; + } + return 0; +} + +static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) +{ + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + struct device_node *node = priv->device->of_node; + int ret; + u32 reg, val; + + regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); + reg = gmac->variant->default_syscon_value; + if (reg != val) + dev_warn(priv->device, + "Current syscon value is not the default %x (expect %x)\n", + val, reg); + + if (gmac->variant->internal_phy) { + if (!gmac->use_internal_phy) { + /* switch to external PHY interface */ + reg &= ~H3_EPHY_SELECT; + } else { + reg |= H3_EPHY_SELECT; + reg &= ~H3_EPHY_SHUTDOWN; + dev_dbg(priv->device, "Select internal_phy %x\n", reg); + + if (of_property_read_bool(priv->plat->phy_node, + "allwinner,leds-active-low")) + reg |= H3_EPHY_LED_POL; + else + reg &= ~H3_EPHY_LED_POL; + + ret = of_mdio_parse_addr(priv->device, + priv->plat->phy_node); + if (ret < 0) { + dev_err(priv->device, "Could not parse MDIO addr\n"); + return ret; + } + /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY + * address. No need to mask it again. + */ + reg |= ret << H3_EPHY_ADDR_SHIFT; + } + } + + if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { + if (val % 100) { + dev_err(priv->device, "tx-delay must be a multiple of 100\n"); + return -EINVAL; + } + val /= 100; + dev_dbg(priv->device, "set tx-delay to %x\n", val); + if (val <= SYSCON_ETXDC_MASK) { + reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT); + reg |= (val << SYSCON_ETXDC_SHIFT); + } else { + dev_err(priv->device, "Invalid TX clock delay: %d\n", + val); + return -EINVAL; + } + } + + if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) { + if (val % 100) { + dev_err(priv->device, "rx-delay must be a multiple of 100\n"); + return -EINVAL; + } + val /= 100; + dev_dbg(priv->device, "set rx-delay to %x\n", val); + if (val <= SYSCON_ERXDC_MASK) { + reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT); + reg |= (val << SYSCON_ERXDC_SHIFT); + } else { + dev_err(priv->device, "Invalid RX clock delay: %d\n", + val); + return -EINVAL; + } + } + + /* Clear interface mode bits */ + reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT); + if (gmac->variant->support_rmii) + reg &= ~SYSCON_RMII_EN; + + switch (priv->plat->interface) { + case PHY_INTERFACE_MODE_MII: + /* default */ + break; + case PHY_INTERFACE_MODE_RGMII: + reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; + break; + case PHY_INTERFACE_MODE_RMII: + reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII; + break; + default: + dev_err(priv->device, "Unsupported interface mode: %s", + phy_modes(priv->plat->interface)); + return -EINVAL; + } + + regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); + + return 0; +} + +static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) +{ + u32 reg = gmac->variant->default_syscon_value; + + regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); +} + +static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv) +{ + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + int ret; + + if (!gmac->use_internal_phy) + return 0; + + ret = clk_prepare_enable(gmac->ephy_clk); + if (ret) { + dev_err(priv->device, "Cannot enable ephy\n"); + return ret; + } + + ret = reset_control_deassert(gmac->rst_ephy); + if (ret) { + dev_err(priv->device, "Cannot deassert ephy\n"); + clk_disable_unprepare(gmac->ephy_clk); + return ret; + } + + return 0; +} + +static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac) +{ + if (!gmac->use_internal_phy) + return 0; + + clk_disable_unprepare(gmac->ephy_clk); + reset_control_assert(gmac->rst_ephy); + return 0; +} + +/* sun8i_power_phy() - Activate the PHY: + * In case of error, no need to call sun8i_unpower_phy(), + * it will be called anyway by sun8i_dwmac_exit() + */ +static int sun8i_power_phy(struct stmmac_priv *priv) +{ + int ret; + + ret = sun8i_dwmac_power_internal_phy(priv); + if (ret) + return ret; + + ret = sun8i_dwmac_set_syscon(priv); + if (ret) + return ret; + + /* After changing syscon value, the MAC need reset or it will use + * the last value (and so the last PHY set. + */ + ret = sun8i_dwmac_reset(priv); + if (ret) + return ret; + return 0; +} + +static void sun8i_unpower_phy(struct sunxi_priv_data *gmac) +{ + sun8i_dwmac_unset_syscon(gmac); + sun8i_dwmac_unpower_internal_phy(gmac); +} + +static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct sunxi_priv_data *gmac = priv; + + sun8i_unpower_phy(gmac); + + clk_disable_unprepare(gmac->tx_clk); + + if (gmac->regulator) + regulator_disable(gmac->regulator); +} + +static const struct stmmac_ops sun8i_dwmac_ops = { + .core_init = sun8i_dwmac_core_init, + .set_mac = sun8i_dwmac_set_mac, + .dump_regs = sun8i_dwmac_dump_mac_regs, + .rx_ipc = sun8i_dwmac_rx_ipc_enable, + .set_filter = sun8i_dwmac_set_filter, + .flow_ctrl = sun8i_dwmac_flow_ctrl, + .set_umac_addr = sun8i_dwmac_set_umac_addr, + .get_umac_addr = sun8i_dwmac_get_umac_addr, +}; + +static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) +{ + struct mac_device_info *mac; + struct stmmac_priv *priv = ppriv; + int ret; + + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + if (!mac) + return NULL; + + ret = sun8i_power_phy(priv); + if (ret) + return NULL; + + mac->pcsr = priv->ioaddr; + mac->mac = &sun8i_dwmac_ops; + mac->dma = &sun8i_dwmac_dma_ops; + + /* The loopback bit seems to be re-set when link change + * Simply mask it each time + * Speed 10/100/1000 are set in BIT(2)/BIT(3) + */ + mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK; + mac->link.speed10 = EMAC_SPEED_10; + mac->link.speed100 = EMAC_SPEED_100; + mac->link.speed1000 = EMAC_SPEED_1000; + mac->link.duplex = EMAC_DUPLEX_FULL; + mac->mii.addr = EMAC_MDIO_CMD; + mac->mii.data = EMAC_MDIO_DATA; + mac->mii.reg_shift = 4; + mac->mii.reg_mask = GENMASK(8, 4); + mac->mii.addr_shift = 12; + mac->mii.addr_mask = GENMASK(16, 12); + mac->mii.clk_csr_shift = 20; + mac->mii.clk_csr_mask = GENMASK(22, 20); + mac->unicast_filter_entries = 8; + + /* Synopsys Id is not available */ + priv->synopsys_id = 0; + + return mac; +} + +static int sun8i_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct sunxi_priv_data *gmac; + struct device *dev = &pdev->dev; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return -ENOMEM; + + gmac->variant = of_device_get_match_data(&pdev->dev); + if (!gmac->variant) { + dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n"); + return -EINVAL; + } + + gmac->tx_clk = devm_clk_get(dev, "stmmaceth"); + if (IS_ERR(gmac->tx_clk)) { + dev_err(dev, "Could not get TX clock\n"); + return PTR_ERR(gmac->tx_clk); + } + + /* Optional regulator for PHY */ + gmac->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(gmac->regulator)) { + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "No regulator found\n"); + gmac->regulator = NULL; + } + + gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "syscon"); + if (IS_ERR(gmac->regmap)) { + ret = PTR_ERR(gmac->regmap); + dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret); + return ret; + } + + plat_dat->interface = of_get_phy_mode(dev->of_node); + if (plat_dat->interface == gmac->variant->internal_phy) { + dev_info(&pdev->dev, "Will use internal PHY\n"); + gmac->use_internal_phy = true; + gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0); + if (IS_ERR(gmac->ephy_clk)) { + ret = PTR_ERR(gmac->ephy_clk); + dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret); + return -EINVAL; + } + + gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL); + if (IS_ERR(gmac->rst_ephy)) { + ret = PTR_ERR(gmac->rst_ephy); + if (ret == -EPROBE_DEFER) + return ret; + dev_err(&pdev->dev, "No EPHY reset control found %d\n", + ret); + return -EINVAL; + } + } else { + dev_info(&pdev->dev, "Will use external PHY\n"); + gmac->use_internal_phy = false; + } + + /* platform data specifying hardware features and callbacks. + * hardware features were copied from Allwinner drivers. + */ + plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; + plat_dat->tx_coe = 1; + plat_dat->has_sun8i = true; + plat_dat->bsp_priv = gmac; + plat_dat->init = sun8i_dwmac_init; + plat_dat->exit = sun8i_dwmac_exit; + plat_dat->setup = sun8i_dwmac_setup; + + ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); + + return ret; +} + +static const struct of_device_id sun8i_dwmac_match[] = { + { .compatible = "allwinner,sun8i-h3-emac", + .data = &emac_variant_h3 }, + { .compatible = "allwinner,sun8i-a83t-emac", + .data = &emac_variant_a83t }, + { .compatible = "allwinner,sun50i-a64-emac", + .data = &emac_variant_a64 }, + { } +}; +MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); + +static struct platform_driver sun8i_dwmac_driver = { + .probe = sun8i_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "dwmac-sun8i", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = sun8i_dwmac_match, + }, +}; +module_platform_driver(sun8i_dwmac_driver); + +MODULE_AUTHOR("Corentin Labbe "); +MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c80c9c3b67db..68a188e74c54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -235,6 +235,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } + + if (priv->plat->has_sun8i) { + if (clk_rate > 160000000) + priv->clk_csr = 0x03; + else if (clk_rate > 80000000) + priv->clk_csr = 0x02; + else if (clk_rate > 40000000) + priv->clk_csr = 0x01; + else + priv->clk_csr = 0; + } } static void print_pkt(unsigned char *buf, int len) @@ -3955,6 +3966,10 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->hw = mac; + /* dwmac-sun8i only work in chain mode */ + if (priv->plat->has_sun8i) + chain_mode = 1; + /* To use the chained or ring mode */ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->mode = &dwmac4_ring_mode_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 7fc3a1ef395a..3840529344ed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -309,6 +309,12 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, struct device_node *np, struct device *dev) { bool mdio = true; + static const struct of_device_id need_mdio_ids[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10" }, + { .compatible = "allwinner,sun8i-a83t-emac" }, + { .compatible = "allwinner,sun8i-h3-emac" }, + { .compatible = "allwinner,sun50i-a64-emac" }, + }; /* If phy-handle property is passed from DT, use it as the PHY */ plat->phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -325,8 +331,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = false; } - /* exception for dwmac-dwc-qos-eth glue logic */ - if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + if (of_match_node(need_mdio_ids, np)) { plat->mdio_node = of_get_child_by_name(np, "mdio"); } else { /** diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 8bb550bca96d..108739ff9223 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -186,6 +186,7 @@ struct plat_stmmacenet_data { struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; + bool has_sun8i; bool tso_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; -- cgit v1.2.3 From 681bdf80cff6844f81216b6b05516d82f69c23fd Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:09 -0700 Subject: i40e/i40evf: create and use new unified header file This moves a header for i40evf to include/linux/avf/virtchnl.h. The directory name AVF is an acronym for the Intel(R) Adaptive Virtual Function. This first step creates the new file, which is a rename of drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h to include/linux/avf/virtchnl.h, and should show up in git as a rename when using git log --follow. To keep things building after the move, the changes to the i40evf driver are made to point to the new include file location. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- MAINTAINERS | 1 + drivers/net/ethernet/intel/i40evf/i40e_common.c | 2 +- drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 2 +- drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 446 --------------------- drivers/net/ethernet/intel/i40evf/i40evf.h | 2 +- include/linux/avf/virtchnl.h | 446 +++++++++++++++++++++ 6 files changed, 450 insertions(+), 449 deletions(-) delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h create mode 100644 include/linux/avf/virtchnl.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index 0fcb5e751ca7..6b7625ff9875 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6738,6 +6738,7 @@ F: Documentation/networking/i40e.txt F: Documentation/networking/i40evf.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ +F: include/linux/avf/virtchnl.h INTEL RDMA RNIC DRIVER M: Faisal Latif diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 6729624fda5b..1db028ac96f4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -27,7 +27,7 @@ #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" -#include "i40e_virtchnl.h" +#include /** * i40e_set_mac_type - Sets MAC type diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 741223d5d809..227905b23690 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -29,7 +29,7 @@ #include "i40e_type.h" #include "i40e_alloc.h" -#include "i40e_virtchnl.h" +#include /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h deleted file mode 100644 index 7d6da3ac24f4..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ /dev/null @@ -1,446 +0,0 @@ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ - -#ifndef _I40E_VIRTCHNL_H_ -#define _I40E_VIRTCHNL_H_ - -/* Description: - * This header file describes the VF-PF communication protocol used - * by the various i40e drivers. - * - * Admin queue buffer usage: - * desc->opcode is always i40e_aqc_opc_send_msg_to_pf - * flags, retval, datalen, and data addr are all used normally. - * Firmware copies the cookie fields when sending messages between the PF and - * VF, but uses all other fields internally. Due to this limitation, we - * must send all messages as "indirect", i.e. using an external buffer. - * - * All the vsi indexes are relative to the VF. Each VF can have maximum of - * three VSIs. All the queue indexes are relative to the VSI. Each VF can - * have a maximum of sixteen queues for all of its VSIs. - * - * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value is of - * i40e_status_code type, defined in the i40e_type.h. - * - * In general, VF driver initialization should roughly follow the order of these - * opcodes. The VF driver must first validate the API version of the PF driver, - * then request a reset, then get resources, then configure queues and - * interrupts. After these operations are complete, the VF driver may start - * its queues, optionally add MAC and VLAN filters, and process traffic. - */ - -/* Opcodes for VF-PF communication. These are placed in the v_opcode field - * of the virtchnl_msg structure. - */ -enum i40e_virtchnl_ops { -/* The PF sends status change events to VFs using - * the I40E_VIRTCHNL_OP_EVENT opcode. - * VFs send requests to the PF using the other ops. - */ - I40E_VIRTCHNL_OP_UNKNOWN = 0, - I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF = 2, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, - I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, - I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, - I40E_VIRTCHNL_OP_ADD_VLAN = 12, - I40E_VIRTCHNL_OP_DEL_VLAN = 13, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, - I40E_VIRTCHNL_OP_GET_STATS = 15, - I40E_VIRTCHNL_OP_RSVD = 16, - I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ - I40E_VIRTCHNL_OP_IWARP = 20, - I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, - I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, - I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, - I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, - I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, - I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, - -}; - -/* Virtual channel message descriptor. This overlays the admin queue - * descriptor. All other data is passed in external buffers. - */ - -struct i40e_virtchnl_msg { - u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - i40e_status v_retval; /* ditto for desc->retval */ - u32 vfid; /* used by PF when sending to VF */ -}; - -/* Message descriptions and data structures.*/ - -/* I40E_VIRTCHNL_OP_VERSION - * VF posts its version number to the PF. PF responds with its version number - * in the same format, along with a return code. - * Reply from PF has its major/minor versions also in param0 and param1. - * If there is a major version mismatch, then the VF cannot operate. - * If there is a minor version mismatch, then the VF can operate but should - * add a warning to the system log. - * - * This enum element MUST always be specified as == 1, regardless of other - * changes in the API. The PF must always respond to this message without - * error regardless of version mismatch. - */ -#define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 - -struct i40e_virtchnl_version_info { - u32 major; - u32 minor; -}; - -/* I40E_VIRTCHNL_OP_RESET_VF - * VF sends this request to PF with no parameters - * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register - * until reset completion is indicated. The admin queue must be reinitialized - * after this operation. - * - * When reset is complete, PF must ensure that all queues in all VSIs associated - * with the VF are stopped, all queue configurations in the HMC are set to 0, - * and all MAC and VLAN filters (except the default MAC address) on all VSIs - * are cleared. - */ - -/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * Version 1.0 VF sends this request to PF with no parameters - * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities - * PF responds with an indirect message containing - * i40e_virtchnl_vf_resource and one or more - * i40e_virtchnl_vsi_resource structures. - */ - -struct i40e_virtchnl_vsi_resource { - u16 vsi_id; - u16 num_queue_pairs; - enum i40e_vsi_type vsi_type; - u16 qset_handle; - u8 default_mac_addr[ETH_ALEN]; -}; -/* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 -#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 - -#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ - I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) - -struct i40e_virtchnl_vf_resource { - u16 num_vsis; - u16 num_queue_pairs; - u16 max_vectors; - u16 max_mtu; - - u32 vf_offload_flags; - u32 rss_key_size; - u32 rss_lut_size; - - struct i40e_virtchnl_vsi_resource vsi_res[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE - * VF sends this message to set up parameters for one TX queue. - * External data buffer contains one instance of i40e_virtchnl_txq_info. - * PF configures requested queue and returns a status code. - */ - -/* Tx queue config info */ -struct i40e_virtchnl_txq_info { - u16 vsi_id; - u16 queue_id; - u16 ring_len; /* number of descriptors, multiple of 8 */ - u16 headwb_enabled; - u64 dma_ring_addr; - u64 dma_headwb_addr; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE - * VF sends this message to set up parameters for one RX queue. - * External data buffer contains one instance of i40e_virtchnl_rxq_info. - * PF configures requested queue and returns a status code. - */ - -/* Rx queue config info */ -struct i40e_virtchnl_rxq_info { - u16 vsi_id; - u16 queue_id; - u32 ring_len; /* number of descriptors, multiple of 32 */ - u16 hdr_size; - u16 splithdr_enabled; - u32 databuffer_size; - u32 max_pkt_size; - u64 dma_ring_addr; - enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES - * VF sends this message to set parameters for all active TX and RX queues - * associated with the specified VSI. - * PF configures queues and returns status. - * If the number of queues specified is greater than the number of queues - * associated with the VSI, an error is returned and no queues are configured. - */ -struct i40e_virtchnl_queue_pair_info { - /* NOTE: vsi_id and queue_id should be identical for both queues. */ - struct i40e_virtchnl_txq_info txq; - struct i40e_virtchnl_rxq_info rxq; -}; - -struct i40e_virtchnl_vsi_queue_config_info { - u16 vsi_id; - u16 num_queue_pairs; - struct i40e_virtchnl_queue_pair_info qpair[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP - * VF uses this message to map vectors to queues. - * The rxq_map and txq_map fields are bitmaps used to indicate which queues - * are to be associated with the specified vector. - * The "other" causes are always mapped to vector 0. - * PF configures interrupt mapping and returns status. - */ -struct i40e_virtchnl_vector_map { - u16 vsi_id; - u16 vector_id; - u16 rxq_map; - u16 txq_map; - u16 rxitr_idx; - u16 txitr_idx; -}; - -struct i40e_virtchnl_irq_map_info { - u16 num_vectors; - struct i40e_virtchnl_vector_map vecmap[1]; -}; - -/* I40E_VIRTCHNL_OP_ENABLE_QUEUES - * I40E_VIRTCHNL_OP_DISABLE_QUEUES - * VF sends these message to enable or disable TX/RX queue pairs. - * The queues fields are bitmaps indicating which queues to act upon. - * (Currently, we only support 16 queues per VF, but we make the field - * u32 to allow for expansion.) - * PF performs requested action and returns status. - */ -struct i40e_virtchnl_queue_select { - u16 vsi_id; - u16 pad; - u32 rx_queues; - u32 tx_queues; -}; - -/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS - * VF sends this message in order to add one or more unicast or multicast - * address filters for the specified VSI. - * PF adds the filters and returns status. - */ - -/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS - * VF sends this message in order to remove one or more unicast or multicast - * filters for the specified VSI. - * PF removes the filters and returns status. - */ - -struct i40e_virtchnl_ether_addr { - u8 addr[ETH_ALEN]; - u8 pad[2]; -}; - -struct i40e_virtchnl_ether_addr_list { - u16 vsi_id; - u16 num_elements; - struct i40e_virtchnl_ether_addr list[1]; -}; - -/* I40E_VIRTCHNL_OP_ADD_VLAN - * VF sends this message to add one or more VLAN tag filters for receives. - * PF adds the filters and returns status. - * If a port VLAN is configured by the PF, this operation will return an - * error to the VF. - */ - -/* I40E_VIRTCHNL_OP_DEL_VLAN - * VF sends this message to remove one or more VLAN tag filters for receives. - * PF removes the filters and returns status. - * If a port VLAN is configured by the PF, this operation will return an - * error to the VF. - */ - -struct i40e_virtchnl_vlan_filter_list { - u16 vsi_id; - u16 num_elements; - u16 vlan_id[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE - * VF sends VSI id and flags. - * PF returns status code in retval. - * Note: we assume that broadcast accept mode is always enabled. - */ -struct i40e_virtchnl_promisc_info { - u16 vsi_id; - u16 flags; -}; - -#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 -#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 - -/* I40E_VIRTCHNL_OP_GET_STATS - * VF sends this message to request stats for the selected VSI. VF uses - * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id - * field is ignored by the PF. - * - * PF replies with struct i40e_eth_stats in an external buffer. - */ - -/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY - * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT - * VF sends these messages to configure RSS. Only supported if both PF - * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during - * configuration negotiation. If this is the case, then the RSS fields in - * the VF resource struct are valid. - * Both the key and LUT are initialized to 0 by the PF, meaning that - * RSS is effectively disabled until set up by the VF. - */ -struct i40e_virtchnl_rss_key { - u16 vsi_id; - u16 key_len; - u8 key[1]; /* RSS hash key, packed bytes */ -}; - -struct i40e_virtchnl_rss_lut { - u16 vsi_id; - u16 lut_entries; - u8 lut[1]; /* RSS lookup table*/ -}; - -/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS - * I40E_VIRTCHNL_OP_SET_RSS_HENA - * VF sends these messages to get and set the hash filter enable bits for RSS. - * By default, the PF sets these to all possible traffic types that the - * hardware supports. The VF can query this value if it wants to change the - * traffic types that are hashed by the hardware. - * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h - */ -struct i40e_virtchnl_rss_hena { - u64 hena; -}; - -/* I40E_VIRTCHNL_OP_EVENT - * PF sends this message to inform the VF driver of events that may affect it. - * No direct response is expected from the VF, though it may generate other - * messages in response to this one. - */ -enum i40e_virtchnl_event_codes { - I40E_VIRTCHNL_EVENT_UNKNOWN = 0, - I40E_VIRTCHNL_EVENT_LINK_CHANGE, - I40E_VIRTCHNL_EVENT_RESET_IMPENDING, - I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, -}; -#define I40E_PF_EVENT_SEVERITY_INFO 0 -#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 - -struct i40e_virtchnl_pf_event { - enum i40e_virtchnl_event_codes event; - union { - struct { - enum i40e_aq_link_speed link_speed; - bool link_status; - } link_event; - } event_data; - - int severity; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP - * VF uses this message to request PF to map IWARP vectors to IWARP queues. - * The request for this originates from the VF IWARP driver through - * a client interface between VF LAN and VF IWARP driver. - * A vector could have an AEQ and CEQ attached to it although - * there is a single AEQ per VF IWARP instance in which case - * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. - * There will never be a case where there will be multiple CEQs attached - * to a single vector. - * PF configures interrupt mapping and returns status. - */ - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF - -struct i40e_virtchnl_iwarp_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct i40e_virtchnl_iwarp_qvlist_info { - u32 num_vectors; - struct i40e_virtchnl_iwarp_qv_info qv_info[1]; -}; - -/* VF reset states - these are written into the RSTAT register: - * I40E_VFGEN_RSTAT1 on the PF - * I40E_VFGEN_RSTAT on the VF - * When the PF initiates a reset, it writes 0 - * When the reset is complete, it writes 1 - * When the PF detects that the VF has recovered, it writes 2 - * VF checks this register periodically to determine if a reset has occurred, - * then polls it to know when the reset is complete. - * If either the PF or VF reads the register while the hardware - * is in a reset state, it will return DEADBEEF, which, when masked - * will result in 3. - */ -enum i40e_vfr_states { - I40E_VFR_INPROGRESS = 0, - I40E_VFR_COMPLETED, - I40E_VFR_VFACTIVE, - I40E_VFR_UNKNOWN, -}; - -#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index b8ada6d8d890..75d314b1a9bb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -43,7 +43,7 @@ #include #include "i40e_type.h" -#include "i40e_virtchnl.h" +#include #include "i40e_txrx.h" #define DEFAULT_DEBUG_LEVEL_SHIFT 3 diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h new file mode 100644 index 000000000000..7d6da3ac24f4 --- /dev/null +++ b/include/linux/avf/virtchnl.h @@ -0,0 +1,446 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_RSVD = 16, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_IWARP = 20, + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, + I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + i40e_status v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 + +#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ + I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct i40e_virtchnl_iwarp_qv_info qv_info[1]; +}; + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ -- cgit v1.2.3 From 1410a90ae449061b7e1ae19d275148f36948801b Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 28 May 2017 10:53:10 +0300 Subject: net/mlx5: Define interface bits for fencing UMR wqe HW can implement UMR wqe re-transmission in various ways. Thus, add HCA cap to distinguish the needed fence for UMR to make sure that the wqe wouldn't fail on mkey checks. Signed-off-by: Max Gurtovoy Acked-by: Leon Romanovsky Reviewed-by: Christoph Hellwig Signed-off-by: Doug Ledford --- include/linux/mlx5/mlx5_ifc.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 32de0724b400..edafedb7b509 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -766,6 +766,12 @@ enum { MLX5_CAP_PORT_TYPE_ETH = 0x1, }; +enum { + MLX5_CAP_UMR_FENCE_STRONG = 0x0, + MLX5_CAP_UMR_FENCE_SMALL = 0x1, + MLX5_CAP_UMR_FENCE_NONE = 0x2, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x80]; @@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_202[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_205[0xa]; + u8 reserved_at_205[0x5]; + u8 umr_fence[0x2]; + u8 reserved_at_20c[0x3]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; -- cgit v1.2.3 From 310a2ad92e3fd9139e3641464f1de113fa89825b Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:11 -0700 Subject: virtchnl: rename i40e to generic virtchnl This morphs all the i40e and i40evf references to/in virtchnl.h to be generic, using only automated methods. Updates all the callers to use the new names. A followup patch provides separate clean ups for messy line conversions from these "automatic" changes, to make them more reviewable. Was executed with the following sed script: sed -i -f transform_script drivers/net/ethernet/intel/i40e/i40e_client.c sed -i -f transform_script drivers/net/ethernet/intel/i40e/i40e_prototype.h sed -i -f transform_script drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c sed -i -f transform_script drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40e_common.c sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40e_prototype.h sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40evf.h sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40evf_client.c sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40evf_main.c sed -i -f transform_script drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c sed -i -f transform_script include/linux/avf/virtchnl.h transform_script: ----8<---- s/I40E_VIRTCHNL_SUPPORTED_QTYPES/SAVE_ME_SUPPORTED_QTYPES/g s/I40E_VIRTCHNL_VF_CAP/SAVE_ME_VF_CAP/g s/I40E_VIRTCHNL_/VIRTCHNL_/g s/i40e_virtchnl_/virtchnl_/g s/i40e_vfr_/virtchnl_vfr_/g s/I40E_VFR_/VIRTCHNL_VFR_/g s/VIRTCHNL_OP_ADD_ETHER_ADDRESS/VIRTCHNL_OP_ADD_ETH_ADDR/g s/VIRTCHNL_OP_DEL_ETHER_ADDRESS/VIRTCHNL_OP_DEL_ETH_ADDR/g s/VIRTCHNL_OP_FCOE/VIRTCHNL_OP_RSVD/g s/SAVE_ME_SUPPORTED_QTYPES/I40E_VIRTCHNL_SUPPORTED_QTYPES/g s/SAVE_ME_VF_CAP/I40E_VIRTCHNL_VF_CAP/g ----8<---- Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_client.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 4 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 370 ++++++++++----------- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 6 +- drivers/net/ethernet/intel/i40evf/i40e_common.c | 10 +- drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 4 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 22 +- drivers/net/ethernet/intel/i40evf/i40evf_client.c | 18 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 48 +-- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 280 ++++++++-------- include/linux/avf/virtchnl.h | 233 ++++++------- 11 files changed, 499 insertions(+), 498 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 088b4a43bd2a..36f694ccdc09 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -565,7 +565,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, struct i40e_hw *hw = &pf->hw; i40e_status err; - err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP, + err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP, 0, msg, len, NULL); if (err) dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index d9c555050e64..df613ea40313 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -333,10 +333,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct i40e_virtchnl_vf_resource *msg); + struct virtchnl_vf_resource *msg); i40e_status i40e_vf_reset(struct i40e_hw *hw); i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 95c23fbaa211..9f361e810990 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -39,7 +39,7 @@ * send a message to all VFs on a given PF **/ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen) { @@ -70,13 +70,13 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, **/ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *ls = &pf->hw.phy.link_info; int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; - pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; @@ -87,7 +87,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) ls->link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = ls->link_speed; } - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -113,12 +113,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) **/ void i40e_vc_notify_reset(struct i40e_pf *pf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); + i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } /** @@ -129,7 +129,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) **/ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; int abs_vf_id; /* validate the request */ @@ -143,11 +143,11 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; - pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, - sizeof(struct i40e_virtchnl_pf_event), NULL); + sizeof(struct virtchnl_pf_event), NULL); } /***********************misc routines*****************************/ @@ -250,7 +250,7 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, * configure irq link list from the map **/ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, - struct i40e_virtchnl_vector_map *vecmap) + struct virtchnl_vector_map *vecmap) { unsigned long linklistmap = 0, tempmap; struct i40e_pf *pf = vf->pf; @@ -338,7 +338,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, /* if the vf is running in polling mode and using interrupt zero, * need to disable auto-mask on enabling zero interrupt for VFs. */ - if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && + if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && (vector_id == 0)) { reg = rd32(hw, I40E_GLINT_CTL); if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { @@ -359,7 +359,7 @@ irq_list_done: static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; + struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; u32 msix_vf; u32 i; @@ -368,7 +368,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { - struct i40e_virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_iwarp_qv_info *qv_info; u32 next_q_index, next_q_type; struct i40e_hw *hw = &pf->hw; u32 v_idx, reg_idx, reg; @@ -409,17 +409,17 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) * Return 0 on success or < 0 on error **/ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info) + struct virtchnl_iwarp_qvlist_info *qvlist_info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - struct i40e_virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_iwarp_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf, size; - size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + - (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + size = sizeof(struct virtchnl_iwarp_qvlist_info) + + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); vf->qvlist_info = kzalloc(size, GFP_KERNEL); vf->qvlist_info->num_vectors = qvlist_info->num_vectors; @@ -492,7 +492,7 @@ err: **/ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, - struct i40e_virtchnl_txq_info *info) + struct virtchnl_txq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; @@ -569,7 +569,7 @@ error_context: **/ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, - struct i40e_virtchnl_rxq_info *info) + struct virtchnl_rxq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; @@ -1017,7 +1017,7 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) * after VF has been fully initialized, because the VF driver may * request resources immediately after setting this flag. */ - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); } /** @@ -1461,7 +1461,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, - enum i40e_virtchnl_ops opcode, + enum virtchnl_ops opcode, i40e_status retval) { return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); @@ -1475,18 +1475,18 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { - struct i40e_virtchnl_version_info info = { - I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR + struct virtchnl_version_info info = { + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR }; - vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; + vf->vf_ver = *(struct virtchnl_version_info *)msg; /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ if (VF_IS_V10(vf)) - info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; - return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, sizeof(struct - i40e_virtchnl_version_info)); + virtchnl_version_info)); } /** @@ -1499,7 +1499,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { - struct i40e_virtchnl_vf_resource *vfres = NULL; + struct virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; struct i40e_vsi *vsi; @@ -1512,8 +1512,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) goto err; } - len = (sizeof(struct i40e_virtchnl_vf_resource) + - sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); + len = (sizeof(struct virtchnl_vf_resource) + + sizeof(struct virtchnl_vsi_resource) * num_vsis); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { @@ -1524,47 +1524,47 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (VF_IS_V11(vf)) vf->driver_caps = *(u32 *)msg; else - vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN; - vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; + vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; if (i40e_vf_client_capable(pf, vf->vf_id) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + VIRTCHNL_VF_OFFLOAD_RSS_AQ; else vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + VIRTCHNL_VF_OFFLOAD_RSS_REG; } if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", @@ -1572,13 +1572,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ret = I40E_ERR_PARAM; goto err; } - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; } vfres->num_vsis = num_vsis; @@ -1601,7 +1601,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) err: /* send the response back to the VF */ - ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len); kfree(vfres); @@ -1655,8 +1655,8 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_promisc_info *info = - (struct i40e_virtchnl_promisc_info *)msg; + struct virtchnl_promisc_info *info = + (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_mac_filter *f; @@ -1788,7 +1788,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); } @@ -1803,9 +1803,9 @@ error_param: **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vsi_queue_config_info *qci = - (struct i40e_virtchnl_vsi_queue_config_info *)msg; - struct i40e_virtchnl_queue_pair_info *qpi; + struct virtchnl_vsi_queue_config_info *qci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *qpi; struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id; i40e_status aq_ret = 0; @@ -1845,7 +1845,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); } @@ -1860,9 +1860,9 @@ error_param: **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_irq_map_info *irqmap_info = - (struct i40e_virtchnl_irq_map_info *)msg; - struct i40e_virtchnl_vector_map *map; + struct virtchnl_irq_map_info *irqmap_info = + (struct virtchnl_irq_map_info *)msg; + struct virtchnl_vector_map *map; u16 vsi_id, vsi_queue_id, vector_id; i40e_status aq_ret = 0; unsigned long tempmap; @@ -1908,7 +1908,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); } @@ -1922,8 +1922,8 @@ error_param: **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; @@ -1947,7 +1947,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_TIMEOUT; error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); } @@ -1962,8 +1962,8 @@ error_param: **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; @@ -1986,7 +1986,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); } @@ -2000,8 +2000,8 @@ error_param: **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; struct i40e_eth_stats stats; i40e_status aq_ret = 0; @@ -2029,7 +2029,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response back to the VF */ - return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); } @@ -2088,8 +2088,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) **/ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_ether_addr_list *al = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; @@ -2143,7 +2143,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, ret); } @@ -2157,8 +2157,8 @@ error_param: **/ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_ether_addr_list *al = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; @@ -2203,7 +2203,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); } @@ -2217,8 +2217,8 @@ error_param: **/ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vfl->vsi_id; @@ -2277,7 +2277,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); } /** @@ -2290,8 +2290,8 @@ error_param: **/ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vfl->vsi_id; @@ -2335,7 +2335,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); } /** @@ -2363,7 +2363,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, aq_ret); } @@ -2379,8 +2379,8 @@ error_param: static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, bool config) { - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = - (struct i40e_virtchnl_iwarp_qvlist_info *)msg; + struct virtchnl_iwarp_qvlist_info *qvlist_info = + (struct virtchnl_iwarp_qvlist_info *)msg; i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || @@ -2399,8 +2399,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, - config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : - I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, aq_ret); } @@ -2414,8 +2414,8 @@ error_param: **/ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_key *vrk = - (struct i40e_virtchnl_rss_key *)msg; + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vrk->vsi_id; @@ -2432,7 +2432,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); err: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret); } @@ -2446,8 +2446,8 @@ err: **/ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_lut *vrl = - (struct i40e_virtchnl_rss_lut *)msg; + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vrl->vsi_id; @@ -2464,7 +2464,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret); } @@ -2478,7 +2478,7 @@ err: **/ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_hena *vrh = NULL; + struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; int len = 0; @@ -2487,7 +2487,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto err; } - len = sizeof(struct i40e_virtchnl_rss_hena); + len = sizeof(struct virtchnl_rss_hena); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { @@ -2498,7 +2498,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) vrh->hena = i40e_pf_get_default_rss_hena(pf); err: /* send the response back to the VF */ - aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret, (u8 *)vrh, len); kfree(vrh); return aq_ret; @@ -2514,8 +2514,8 @@ err: **/ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_hena *vrh = - (struct i40e_virtchnl_rss_hena *)msg; + struct virtchnl_rss_hena *vrh = + (struct virtchnl_rss_hena *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; @@ -2530,7 +2530,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); } @@ -2555,78 +2555,78 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, /* Validate message length. */ switch (v_opcode) { - case I40E_VIRTCHNL_OP_VERSION: - valid_len = sizeof(struct i40e_virtchnl_version_info); + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); break; - case I40E_VIRTCHNL_OP_RESET_VF: + case VIRTCHNL_OP_RESET_VF: break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(vf)) valid_len = sizeof(u32); break; - case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: - valid_len = sizeof(struct i40e_virtchnl_txq_info); + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); break; - case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: - valid_len = sizeof(struct i40e_virtchnl_rxq_info); + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: - valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { - struct i40e_virtchnl_vsi_queue_config_info *vqc = - (struct i40e_virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; valid_len += (vqc->num_queue_pairs * sizeof(struct - i40e_virtchnl_queue_pair_info)); + virtchnl_queue_pair_info)); if (vqc->num_queue_pairs == 0) err_msg_format = true; } break; - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: - valid_len = sizeof(struct i40e_virtchnl_irq_map_info); + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { - struct i40e_virtchnl_irq_map_info *vimi = - (struct i40e_virtchnl_irq_map_info *)msg; + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; valid_len += (vimi->num_vectors * - sizeof(struct i40e_virtchnl_vector_map)); + sizeof(struct virtchnl_vector_map)); if (vimi->num_vectors == 0) err_msg_format = true; } break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: - valid_len = sizeof(struct i40e_virtchnl_queue_select); + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: - valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { - struct i40e_virtchnl_ether_addr_list *veal = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; valid_len += veal->num_elements * - sizeof(struct i40e_virtchnl_ether_addr); + sizeof(struct virtchnl_ether_addr); if (veal->num_elements == 0) err_msg_format = true; } break; - case I40E_VIRTCHNL_OP_ADD_VLAN: - case I40E_VIRTCHNL_OP_DEL_VLAN: - valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; valid_len += vfl->num_elements * sizeof(u16); if (vfl->num_elements == 0) err_msg_format = true; } break; - case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - valid_len = sizeof(struct i40e_virtchnl_promisc_info); + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); break; - case I40E_VIRTCHNL_OP_GET_STATS: - valid_len = sizeof(struct i40e_virtchnl_queue_select); + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); break; - case I40E_VIRTCHNL_OP_IWARP: + case VIRTCHNL_OP_IWARP: /* These messages are opaque to us and will be validated in * the RDMA client code. We just need to check for nonzero * length. The firmware will enforce max length restrictions. @@ -2636,27 +2636,27 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, else err_msg_format = true; break; - case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: valid_len = 0; break; - case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info); + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); if (msglen >= valid_len) { - struct i40e_virtchnl_iwarp_qvlist_info *qv = - (struct i40e_virtchnl_iwarp_qvlist_info *)msg; + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; if (qv->num_vectors == 0) { err_msg_format = true; break; } valid_len += ((qv->num_vectors - 1) * - sizeof(struct i40e_virtchnl_iwarp_qv_info)); + sizeof(struct virtchnl_iwarp_qv_info)); } break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: - valid_len = sizeof(struct i40e_virtchnl_rss_key); + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); if (msglen >= valid_len) { - struct i40e_virtchnl_rss_key *vrk = - (struct i40e_virtchnl_rss_key *)msg; + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) { err_msg_format = true; break; @@ -2664,11 +2664,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len += vrk->key_len - 1; } break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: - valid_len = sizeof(struct i40e_virtchnl_rss_lut); + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); if (msglen >= valid_len) { - struct i40e_virtchnl_rss_lut *vrl = - (struct i40e_virtchnl_rss_lut *)msg; + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { err_msg_format = true; break; @@ -2676,14 +2676,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len += vrl->lut_entries - 1; } break; - case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: break; - case I40E_VIRTCHNL_OP_SET_RSS_HENA: - valid_len = sizeof(struct i40e_virtchnl_rss_hena); + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); break; /* These are always errors coming from the VF. */ - case I40E_VIRTCHNL_OP_EVENT: - case I40E_VIRTCHNL_OP_UNKNOWN: + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: default: return -EPERM; } @@ -2729,70 +2729,70 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, } switch (v_opcode) { - case I40E_VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_VERSION: ret = i40e_vc_get_version_msg(vf, msg); break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_GET_VF_RESOURCES: ret = i40e_vc_get_vf_resources_msg(vf, msg); break; - case I40E_VIRTCHNL_OP_RESET_VF: + case VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); ret = 0; break; - case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ret = i40e_vc_config_queues_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_ENABLE_QUEUES: ret = i40e_vc_enable_queues_msg(vf, msg, msglen); i40e_vc_notify_vf_link_state(vf); break; - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: ret = i40e_vc_disable_queues_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case VIRTCHNL_OP_ADD_ETH_ADDR: ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + case VIRTCHNL_OP_DEL_ETH_ADDR: ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_ADD_VLAN: ret = i40e_vc_add_vlan_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_DEL_VLAN: + case VIRTCHNL_OP_DEL_VLAN: ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_GET_STATS: + case VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_IWARP: + case VIRTCHNL_OP_IWARP: ret = i40e_vc_iwarp_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); break; - case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + case VIRTCHNL_OP_CONFIG_RSS_KEY: ret = i40e_vc_config_rss_key(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + case VIRTCHNL_OP_CONFIG_RSS_LUT: ret = i40e_vc_config_rss_lut(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: ret = i40e_vc_get_rss_hena(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_SET_RSS_HENA: + case VIRTCHNL_OP_SET_RSS_HENA: ret = i40e_vc_set_rss_hena(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_UNKNOWN: + case VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); @@ -3218,7 +3218,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; int abs_vf_id; @@ -3234,7 +3234,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; switch (link) { @@ -3262,7 +3262,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) goto error_out; } /* Notify the VF of its new link state */ - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); error_out: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 20d7c8160e9e..b57ffffce141 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -81,13 +81,13 @@ struct i40e_vf { s16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; - struct i40e_virtchnl_version_info vf_ver; + struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ /* VF Port Extender (PE) stag if used */ u16 stag; - struct i40e_virtchnl_ether_addr default_lan_addr; + struct virtchnl_ether_addr default_lan_addr; u16 port_vlan_id; bool pf_set_mac; /* The VMM admin set the VF MAC address */ bool trusted; @@ -115,7 +115,7 @@ struct i40e_vf { u16 num_vlan; /* RDMA Client */ - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info; + struct virtchnl_iwarp_qvlist_info *qvlist_info; }; void i40e_free_vfs(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 1db028ac96f4..9a7d995080b6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1054,7 +1054,7 @@ do_retry: * completion before returning. **/ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) @@ -1092,9 +1092,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, * with appropriate information. **/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct i40e_virtchnl_vf_resource *msg) + struct virtchnl_vf_resource *msg) { - struct i40e_virtchnl_vsi_resource *vsi_res; + struct virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; @@ -1104,7 +1104,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_L2; + VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.fcoe = 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == I40E_VSI_SRIOV) { @@ -1127,7 +1127,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, **/ i40e_status i40e_vf_reset(struct i40e_hw *hw) { - return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 227905b23690..c9836bba487d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -87,10 +87,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct i40e_virtchnl_vf_resource *msg); + struct virtchnl_vf_resource *msg); i40e_status i40e_vf_reset(struct i40e_hw *hw); i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 75d314b1a9bb..9d8c21b36332 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -264,25 +264,25 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_aq_link_speed link_speed; - enum i40e_virtchnl_ops current_op; + enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ + VIRTCHNL_VF_OFFLOAD_IWARP : \ 0) #define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) + VIRTCHNL_VF_OFFLOAD_RSS_PF) #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) + VIRTCHNL_VF_OFFLOAD_RSS_AQ) #define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ - (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF))) + (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_VLAN) - struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ - struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ - struct i40e_virtchnl_version_info pf_version; + VIRTCHNL_VF_OFFLOAD_VLAN) + struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ ((_a)->pf_version.minor == 1)) u16 msg_enable; @@ -348,7 +348,7 @@ void i40evf_set_hena(struct i40evf_adapter *adapter); void i40evf_set_rss_key(struct i40evf_adapter *adapter); void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); int i40evf_config_rss(struct i40evf_adapter *adapter); int i40evf_lan_add_device(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c index ee737680a0e9..93cf5fd17d91 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -120,7 +120,7 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev) return -EAGAIN; err = i40e_aq_send_msg_to_pf(&adapter->hw, - I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, I40E_SUCCESS, NULL, 0, NULL); if (err) @@ -410,7 +410,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, if (adapter->aq_required) return -EAGAIN; - err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP, + err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, I40E_SUCCESS, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", @@ -431,7 +431,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, struct i40e_client *client, struct i40e_qvlist_info *qvlist_info) { - struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info; + struct virtchnl_iwarp_qvlist_info *v_qvlist_info; struct i40evf_adapter *adapter = ldev->vf; struct i40e_qv_info *qv_info; i40e_status err; @@ -453,14 +453,14 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, return -EINVAL; } - v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info; - msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + - (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; + msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) + + (sizeof(struct virtchnl_iwarp_qv_info) * (v_qvlist_info->num_vectors - 1)); - adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); + adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); err = i40e_aq_send_msg_to_pf(&adapter->hw, - I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); if (err) { @@ -474,7 +474,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, for (i = 0; i < 5; i++) { msleep(100); if (!(adapter->client_pending & - BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { + BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { err = 0; break; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index ea110a730e16..5d7b613e0d62 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1131,7 +1131,7 @@ void i40evf_down(struct i40evf_adapter *adapter) if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && adapter->state != __I40EVF_RESETTING) { /* cancel any current operation */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. @@ -1311,7 +1311,7 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) struct i40e_hw *hw = &adapter->hw; int ret = 0; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", adapter->current_op); @@ -1410,7 +1410,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; else adapter->hena = I40E_DEFAULT_RSS_HENA; @@ -1588,8 +1588,8 @@ static void i40evf_watchdog_task(struct work_struct *work) if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { reg_val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((reg_val == I40E_VFR_VFACTIVE) || - (reg_val == I40E_VFR_COMPLETED)) { + if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || + (reg_val == VIRTCHNL_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); adapter->state = __I40EVF_STARTUP; @@ -1605,7 +1605,7 @@ static void i40evf_watchdog_task(struct work_struct *work) return; } adapter->aq_required = 0; - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; goto watchdog_done; } @@ -1621,7 +1621,7 @@ static void i40evf_watchdog_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); schedule_work(&adapter->reset_task); adapter->aq_required = 0; - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; goto watchdog_done; } @@ -1854,7 +1854,7 @@ static void i40evf_reset_task(struct work_struct *work) reg_val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (reg_val == I40E_VFR_VFACTIVE) + if (reg_val == VIRTCHNL_VFR_VFACTIVE) break; } @@ -1888,7 +1888,7 @@ continue_reset: /* kill and reinit the admin queue */ i40evf_shutdown_adminq(hw); - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", @@ -1949,7 +1949,7 @@ static void i40evf_adminq_task(struct work_struct *work) container_of(work, struct i40evf_adapter, adminq_task); struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - struct i40e_virtchnl_msg *v_msg; + struct virtchnl_msg *v_msg; i40e_status ret; u32 val, oldval; u16 pending; @@ -1962,7 +1962,7 @@ static void i40evf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; - v_msg = (struct i40e_virtchnl_msg *)&event.desc; + v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40evf_clean_arq_element(hw, &event, &pending); if (ret || !v_msg->v_opcode) @@ -2347,7 +2347,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); features &= ~I40EVF_VLAN_FEATURES; - if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) + if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN) features |= I40EVF_VLAN_FEATURES; return features; } @@ -2384,8 +2384,8 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) for (i = 0; i < 100; i++) { rstat = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat == I40E_VFR_VFACTIVE) || - (rstat == I40E_VFR_COMPLETED)) + if ((rstat == VIRTCHNL_VFR_VFACTIVE) || + (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; usleep_range(10, 20); } @@ -2401,7 +2401,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) **/ int i40evf_process_config(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; + struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct i40e_vsi *vsi = &adapter->vsi; int i; @@ -2434,7 +2434,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) /* advertise to stack only if offloads for encapsulated packets is * supported */ - if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) { + if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | @@ -2445,7 +2445,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) 0; if (!(vfres->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; @@ -2472,7 +2472,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = vfres->rss_key_size; adapter->rss_lut_size = vfres->rss_lut_size; } else { @@ -2558,8 +2558,8 @@ static void i40evf_init_task(struct work_struct *work) dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", adapter->pf_version.major, adapter->pf_version.minor, - I40E_VIRTCHNL_VERSION_MAJOR, - I40E_VIRTCHNL_VERSION_MINOR); + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); goto err; } err = i40evf_send_vf_config_msg(adapter); @@ -2573,9 +2573,9 @@ static void i40evf_init_task(struct work_struct *work) case __I40EVF_INIT_GET_RESOURCES: /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { - bufsz = sizeof(struct i40e_virtchnl_vf_resource) + + bufsz = sizeof(struct virtchnl_vf_resource) + (I40E_MAX_VF_VSI * - sizeof(struct i40e_virtchnl_vsi_resource)); + sizeof(struct virtchnl_vsi_resource)); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); if (!adapter->vf_res) goto err; @@ -2606,7 +2606,7 @@ static void i40evf_init_task(struct work_struct *work) if (i40evf_process_config(adapter)) goto err_alloc; - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; @@ -2644,7 +2644,7 @@ static void i40evf_init_task(struct work_struct *work) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; err = i40evf_request_misc_irq(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 91b21f26f8d4..90a17b0347b9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -42,7 +42,7 @@ * Send message to PF and print status if failure. **/ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, - enum i40e_virtchnl_ops op, u8 *msg, u16 len) + enum virtchnl_ops op, u8 *msg, u16 len) { struct i40e_hw *hw = &adapter->hw; i40e_status err; @@ -68,12 +68,12 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, **/ int i40evf_send_api_ver(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_version_info vvi; + struct virtchnl_version_info vvi; - vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; - vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; + vvi.major = VIRTCHNL_VERSION_MAJOR; + vvi.minor = VIRTCHNL_VERSION_MINOR; - return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, + return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); } @@ -88,10 +88,10 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter) **/ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_version_info *pf_vvi; + struct virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - enum i40e_virtchnl_ops op; + enum virtchnl_ops op; i40e_status err; event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; @@ -109,8 +109,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) if (err) goto out_alloc; op = - (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == I40E_VIRTCHNL_OP_VERSION) + (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_VERSION) break; } @@ -119,19 +119,19 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) if (err) goto out_alloc; - if (op != I40E_VIRTCHNL_OP_VERSION) { + if (op != VIRTCHNL_OP_VERSION) { dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", op); err = -EIO; goto out_alloc; } - pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; + pf_vvi = (struct virtchnl_version_info *)event.msg_buf; adapter->pf_version = *pf_vvi; - if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || - ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && - (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) + if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) err = -EIO; out_alloc: @@ -152,25 +152,25 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) { u32 caps; - caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | - I40E_VIRTCHNL_VF_OFFLOAD_ENCAP | - I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; - - adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_PF | + VIRTCHNL_VF_OFFLOAD_RSS_AQ | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN | + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | + VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) return i40evf_send_pf_msg(adapter, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_GET_VF_RESOURCES, (u8 *)&caps, sizeof(caps)); else return i40evf_send_pf_msg(adapter, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_GET_VF_RESOURCES, NULL, 0); } @@ -188,12 +188,12 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - enum i40e_virtchnl_ops op; + enum virtchnl_ops op; i40e_status err; u16 len; - len = sizeof(struct i40e_virtchnl_vf_resource) + - I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); + len = sizeof(struct virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); event.buf_len = len; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { @@ -209,8 +209,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) if (err) goto out_alloc; op = - (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_GET_VF_RESOURCES) break; } @@ -232,20 +232,20 @@ out: **/ void i40evf_configure_queues(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_vsi_queue_config_info *vqci; - struct i40e_virtchnl_queue_pair_info *vqpi; + struct virtchnl_vsi_queue_config_info *vqci; + struct virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; int i, len, max_frame = I40E_MAX_RXBUFFER; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", adapter->current_op); return; } - adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; - len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + - (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); + adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + len = sizeof(struct virtchnl_vsi_queue_config_info) + + (sizeof(struct virtchnl_queue_pair_info) * pairs); vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -278,7 +278,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) } adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); kfree(vqci); } @@ -291,20 +291,20 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) **/ void i40evf_enable_queues(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_queue_select vqs; + struct virtchnl_queue_select vqs; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", adapter->current_op); return; } - adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } @@ -316,20 +316,20 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) **/ void i40evf_disable_queues(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_queue_select vqs; + struct virtchnl_queue_select vqs; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", adapter->current_op); return; } - adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } @@ -342,23 +342,23 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) **/ void i40evf_map_queues(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_irq_map_info *vimi; + struct virtchnl_irq_map_info *vimi; int v_idx, q_vectors, len; struct i40e_q_vector *q_vector; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", adapter->current_op); return; } - adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; q_vectors = adapter->num_msix_vectors - NONQ_VECS; - len = sizeof(struct i40e_virtchnl_irq_map_info) + + len = sizeof(struct virtchnl_irq_map_info) + (adapter->num_msix_vectors * - sizeof(struct i40e_virtchnl_vector_map)); + sizeof(struct virtchnl_vector_map)); vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; @@ -379,7 +379,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vimi->vecmap[v_idx].rxq_map = 0; adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vimi, len); kfree(vimi); } @@ -394,12 +394,12 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) **/ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_ether_addr_list *veal; + struct virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; struct i40evf_mac_filter *f; bool more = false; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", adapter->current_op); @@ -413,17 +413,17 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; return; } - adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; - len = sizeof(struct i40e_virtchnl_ether_addr_list) + - (count * sizeof(struct i40e_virtchnl_ether_addr)); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct i40e_virtchnl_ether_addr_list)) / - sizeof(struct i40e_virtchnl_ether_addr); - len = sizeof(struct i40e_virtchnl_ether_addr_list) + - (count * sizeof(struct i40e_virtchnl_ether_addr)); + sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); more = true; } @@ -444,7 +444,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); } @@ -459,12 +459,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) **/ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_ether_addr_list *veal; + struct virtchnl_ether_addr_list *veal; struct i40evf_mac_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", adapter->current_op); @@ -478,17 +478,17 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; return; } - adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; - len = sizeof(struct i40e_virtchnl_ether_addr_list) + - (count * sizeof(struct i40e_virtchnl_ether_addr)); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct i40e_virtchnl_ether_addr_list)) / - sizeof(struct i40e_virtchnl_ether_addr); - len = sizeof(struct i40e_virtchnl_ether_addr_list) + - (count * sizeof(struct i40e_virtchnl_ether_addr)); + sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); more = true; } veal = kzalloc(len, GFP_KERNEL); @@ -509,7 +509,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); } @@ -524,12 +524,12 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) **/ void i40evf_add_vlans(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_vlan_filter_list *vvfl; + struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct i40evf_vlan_filter *f; bool more = false; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", adapter->current_op); @@ -544,16 +544,16 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; return; } - adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); - len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); more = true; } @@ -574,7 +574,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -588,12 +588,12 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) **/ void i40evf_del_vlans(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_vlan_filter_list *vvfl; + struct virtchnl_vlan_filter_list *vvfl; struct i40evf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", adapter->current_op); @@ -608,16 +608,16 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; return; } - adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); - len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); more = true; } @@ -639,7 +639,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -652,10 +652,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) **/ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) { - struct i40e_virtchnl_promisc_info vpi; + struct virtchnl_promisc_info vpi; int promisc_all; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", adapter->current_op); @@ -682,10 +682,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } - adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); } @@ -697,19 +697,19 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) **/ void i40evf_request_stats(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_queue_select vqs; + struct virtchnl_queue_select vqs; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; } - adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; + adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, + if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs))) /* if the request failed, don't lock out others */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** @@ -720,15 +720,15 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) **/ void i40evf_get_hena(struct i40evf_adapter *adapter) { - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", adapter->current_op); return; } - adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS; + adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); } @@ -740,18 +740,18 @@ void i40evf_get_hena(struct i40evf_adapter *adapter) **/ void i40evf_set_hena(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_rss_hena vrh; + struct virtchnl_rss_hena vrh; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", adapter->current_op); return; } vrh.hena = adapter->hena; - adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA; + adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, sizeof(vrh)); } @@ -763,16 +763,16 @@ void i40evf_set_hena(struct i40evf_adapter *adapter) **/ void i40evf_set_rss_key(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_rss_key *vrk; + struct virtchnl_rss_key *vrk; int len; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", adapter->current_op); return; } - len = sizeof(struct i40e_virtchnl_rss_key) + + len = sizeof(struct virtchnl_rss_key) + (adapter->rss_key_size * sizeof(u8)) - 1; vrk = kzalloc(len, GFP_KERNEL); if (!vrk) @@ -781,9 +781,9 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) vrk->key_len = adapter->rss_key_size; memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); - adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY; + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); kfree(vrk); } @@ -796,16 +796,16 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) **/ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) { - struct i40e_virtchnl_rss_lut *vrl; + struct virtchnl_rss_lut *vrl; int len; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", adapter->current_op); return; } - len = sizeof(struct i40e_virtchnl_rss_lut) + + len = sizeof(struct virtchnl_rss_lut) + (adapter->rss_lut_size * sizeof(u8)) - 1; vrl = kzalloc(len, GFP_KERNEL); if (!vrl) @@ -813,9 +813,9 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) vrl->vsi_id = adapter->vsi.id; vrl->lut_entries = adapter->rss_lut_size; memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); - adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT; + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); kfree(vrl); } @@ -871,8 +871,8 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) void i40evf_request_reset(struct i40evf_adapter *adapter) { /* Don't check CURRENT_OP - this is always higher priority */ - i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** @@ -888,17 +888,17 @@ void i40evf_request_reset(struct i40evf_adapter *adapter) * This function handles the reply messages. **/ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; - if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { - struct i40e_virtchnl_pf_event *vpe = - (struct i40e_virtchnl_pf_event *)msg; + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)msg; switch (vpe->event) { - case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = vpe->event_data.link_event.link_speed; if (adapter->link_up != @@ -915,7 +915,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_print_link_message(adapter); } break; - case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "PF reset warning received\n"); if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { adapter->flags |= I40EVF_FLAG_RESET_PENDING; @@ -932,19 +932,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } if (v_retval) { switch (v_opcode) { - case I40E_VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_ADD_VLAN: dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", i40evf_stat_str(&adapter->hw, v_retval)); break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", i40evf_stat_str(&adapter->hw, v_retval)); break; - case I40E_VIRTCHNL_OP_DEL_VLAN: + case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", i40evf_stat_str(&adapter->hw, v_retval)); break; - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", i40evf_stat_str(&adapter->hw, v_retval)); break; @@ -956,7 +956,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } } switch (v_opcode) { - case I40E_VIRTCHNL_OP_GET_STATS: { + case VIRTCHNL_OP_GET_STATS: { struct i40e_eth_stats *stats = (struct i40e_eth_stats *)msg; netdev->stats.rx_packets = stats->rx_unicast + @@ -973,10 +973,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_stats = *stats; } break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { - u16 len = sizeof(struct i40e_virtchnl_vf_resource) + + case VIRTCHNL_OP_GET_VF_RESOURCES: { + u16 len = sizeof(struct virtchnl_vf_resource) + I40E_MAX_VF_VSI * - sizeof(struct i40e_virtchnl_vsi_resource); + sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); /* restore current mac address */ @@ -984,18 +984,18 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_process_config(adapter); } break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ i40evf_irq_enable(adapter, true); break; - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); if (adapter->state == __I40EVF_DOWN_PENDING) adapter->state = __I40EVF_DOWN; break; - case I40E_VIRTCHNL_OP_VERSION: - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: /* Don't display an error if we get these out of sequence. * If the firmware needed to get kicked, we'll get these and * it's no problem. @@ -1003,7 +1003,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; - case I40E_VIRTCHNL_OP_IWARP: + case VIRTCHNL_OP_IWARP: /* Gobble zero-length replies from the PF. They indicate that * a previous message was received OK, and the client doesn't * care about that. @@ -1013,13 +1013,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: adapter->client_pending &= - ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); + ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); break; - case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { - struct i40e_virtchnl_rss_hena *vrh = - (struct i40e_virtchnl_rss_hena *)msg; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { + struct virtchnl_rss_hena *vrh = + (struct virtchnl_rss_hena *)msg; if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else @@ -1033,5 +1033,5 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_op, v_opcode); break; } /* switch v_opcode */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; } diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 7d6da3ac24f4..a8b616121960 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -24,8 +24,8 @@ * ******************************************************************************/ -#ifndef _I40E_VIRTCHNL_H_ -#define _I40E_VIRTCHNL_H_ +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ /* Description: * This header file describes the VF-PF communication protocol used @@ -56,36 +56,36 @@ /* Opcodes for VF-PF communication. These are placed in the v_opcode field * of the virtchnl_msg structure. */ -enum i40e_virtchnl_ops { +enum virtchnl_ops { /* The PF sends status change events to VFs using - * the I40E_VIRTCHNL_OP_EVENT opcode. + * the VIRTCHNL_OP_EVENT opcode. * VFs send requests to the PF using the other ops. */ - I40E_VIRTCHNL_OP_UNKNOWN = 0, - I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF = 2, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, - I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, - I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, - I40E_VIRTCHNL_OP_ADD_VLAN = 12, - I40E_VIRTCHNL_OP_DEL_VLAN = 13, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, - I40E_VIRTCHNL_OP_GET_STATS = 15, - I40E_VIRTCHNL_OP_RSVD = 16, - I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ - I40E_VIRTCHNL_OP_IWARP = 20, - I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, - I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, - I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, - I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, - I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, - I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_IWARP = 20, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, }; @@ -93,16 +93,16 @@ enum i40e_virtchnl_ops { * descriptor. All other data is passed in external buffers. */ -struct i40e_virtchnl_msg { +struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ i40e_status v_retval; /* ditto for desc->retval */ u32 vfid; /* used by PF when sending to VF */ }; /* Message descriptions and data structures.*/ -/* I40E_VIRTCHNL_OP_VERSION +/* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number * in the same format, along with a return code. * Reply from PF has its major/minor versions also in param0 and param1. @@ -114,16 +114,16 @@ struct i40e_virtchnl_msg { * changes in the API. The PF must always respond to this message without * error regardless of version mismatch. */ -#define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 -struct i40e_virtchnl_version_info { +struct virtchnl_version_info { u32 major; u32 minor; }; -/* I40E_VIRTCHNL_OP_RESET_VF +/* VIRTCHNL_OP_RESET_VF * VF sends this request to PF with no parameters * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register * until reset completion is indicated. The admin queue must be reinitialized @@ -135,15 +135,15 @@ struct i40e_virtchnl_version_info { * are cleared. */ -/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES +/* VIRTCHNL_OP_GET_VF_RESOURCES * Version 1.0 VF sends this request to PF with no parameters * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing - * i40e_virtchnl_vf_resource and one or more - * i40e_virtchnl_vsi_resource structures. + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. */ -struct i40e_virtchnl_vsi_resource { +struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; enum i40e_vsi_type vsi_type; @@ -151,23 +151,24 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 -#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 - -#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ - I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) - -struct i40e_virtchnl_vf_resource { +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 + +#define I40E_VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { u16 num_vsis; u16 num_queue_pairs; u16 max_vectors; @@ -177,17 +178,17 @@ struct i40e_virtchnl_vf_resource { u32 rss_key_size; u32 rss_lut_size; - struct i40e_virtchnl_vsi_resource vsi_res[1]; + struct virtchnl_vsi_resource vsi_res[1]; }; -/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE +/* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. - * External data buffer contains one instance of i40e_virtchnl_txq_info. + * External data buffer contains one instance of virtchnl_txq_info. * PF configures requested queue and returns a status code. */ /* Tx queue config info */ -struct i40e_virtchnl_txq_info { +struct virtchnl_txq_info { u16 vsi_id; u16 queue_id; u16 ring_len; /* number of descriptors, multiple of 8 */ @@ -196,14 +197,14 @@ struct i40e_virtchnl_txq_info { u64 dma_headwb_addr; }; -/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE +/* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. - * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * External data buffer contains one instance of virtchnl_rxq_info. * PF configures requested queue and returns a status code. */ /* Rx queue config info */ -struct i40e_virtchnl_rxq_info { +struct virtchnl_rxq_info { u16 vsi_id; u16 queue_id; u32 ring_len; /* number of descriptors, multiple of 32 */ @@ -215,33 +216,33 @@ struct i40e_virtchnl_rxq_info { enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; }; -/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES * VF sends this message to set parameters for all active TX and RX queues * associated with the specified VSI. * PF configures queues and returns status. * If the number of queues specified is greater than the number of queues * associated with the VSI, an error is returned and no queues are configured. */ -struct i40e_virtchnl_queue_pair_info { +struct virtchnl_queue_pair_info { /* NOTE: vsi_id and queue_id should be identical for both queues. */ - struct i40e_virtchnl_txq_info txq; - struct i40e_virtchnl_rxq_info rxq; + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; }; -struct i40e_virtchnl_vsi_queue_config_info { +struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; - struct i40e_virtchnl_queue_pair_info qpair[1]; + struct virtchnl_queue_pair_info qpair[1]; }; -/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP +/* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues * are to be associated with the specified vector. * The "other" causes are always mapped to vector 0. * PF configures interrupt mapping and returns status. */ -struct i40e_virtchnl_vector_map { +struct virtchnl_vector_map { u16 vsi_id; u16 vector_id; u16 rxq_map; @@ -250,75 +251,75 @@ struct i40e_virtchnl_vector_map { u16 txitr_idx; }; -struct i40e_virtchnl_irq_map_info { +struct virtchnl_irq_map_info { u16 num_vectors; - struct i40e_virtchnl_vector_map vecmap[1]; + struct virtchnl_vector_map vecmap[1]; }; -/* I40E_VIRTCHNL_OP_ENABLE_QUEUES - * I40E_VIRTCHNL_OP_DISABLE_QUEUES +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES * VF sends these message to enable or disable TX/RX queue pairs. * The queues fields are bitmaps indicating which queues to act upon. * (Currently, we only support 16 queues per VF, but we make the field * u32 to allow for expansion.) * PF performs requested action and returns status. */ -struct i40e_virtchnl_queue_select { +struct virtchnl_queue_select { u16 vsi_id; u16 pad; u32 rx_queues; u32 tx_queues; }; -/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS +/* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. * PF adds the filters and returns status. */ -/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS +/* VIRTCHNL_OP_DEL_ETH_ADDR * VF sends this message in order to remove one or more unicast or multicast * filters for the specified VSI. * PF removes the filters and returns status. */ -struct i40e_virtchnl_ether_addr { +struct virtchnl_ether_addr { u8 addr[ETH_ALEN]; u8 pad[2]; }; -struct i40e_virtchnl_ether_addr_list { +struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; - struct i40e_virtchnl_ether_addr list[1]; + struct virtchnl_ether_addr list[1]; }; -/* I40E_VIRTCHNL_OP_ADD_VLAN +/* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. * PF adds the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ -/* I40E_VIRTCHNL_OP_DEL_VLAN +/* VIRTCHNL_OP_DEL_VLAN * VF sends this message to remove one or more VLAN tag filters for receives. * PF removes the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ -struct i40e_virtchnl_vlan_filter_list { +struct virtchnl_vlan_filter_list { u16 vsi_id; u16 num_elements; u16 vlan_id[1]; }; -/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. * Note: we assume that broadcast accept mode is always enabled. */ -struct i40e_virtchnl_promisc_info { +struct virtchnl_promisc_info { u16 vsi_id; u16 flags; }; @@ -326,63 +327,63 @@ struct i40e_virtchnl_promisc_info { #define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 #define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 -/* I40E_VIRTCHNL_OP_GET_STATS +/* VIRTCHNL_OP_GET_STATS * VF sends this message to request stats for the selected VSI. VF uses - * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * the virtchnl_queue_select struct to specify the VSI. The queue_id * field is ignored by the PF. * * PF replies with struct i40e_eth_stats in an external buffer. */ -/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY - * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT * VF sends these messages to configure RSS. Only supported if both PF - * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during * configuration negotiation. If this is the case, then the RSS fields in * the VF resource struct are valid. * Both the key and LUT are initialized to 0 by the PF, meaning that * RSS is effectively disabled until set up by the VF. */ -struct i40e_virtchnl_rss_key { +struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ }; -struct i40e_virtchnl_rss_lut { +struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table*/ }; -/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS - * I40E_VIRTCHNL_OP_SET_RSS_HENA +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h */ -struct i40e_virtchnl_rss_hena { +struct virtchnl_rss_hena { u64 hena; }; -/* I40E_VIRTCHNL_OP_EVENT +/* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other * messages in response to this one. */ -enum i40e_virtchnl_event_codes { - I40E_VIRTCHNL_EVENT_UNKNOWN = 0, - I40E_VIRTCHNL_EVENT_LINK_CHANGE, - I40E_VIRTCHNL_EVENT_RESET_IMPENDING, - I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, }; #define I40E_PF_EVENT_SEVERITY_INFO 0 #define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 -struct i40e_virtchnl_pf_event { - enum i40e_virtchnl_event_codes event; +struct virtchnl_pf_event { + enum virtchnl_event_codes event; union { struct { enum i40e_aq_link_speed link_speed; @@ -393,7 +394,7 @@ struct i40e_virtchnl_pf_event { int severity; }; -/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP * VF uses this message to request PF to map IWARP vectors to IWARP queues. * The request for this originates from the VF IWARP driver through * a client interface between VF LAN and VF IWARP driver. @@ -412,16 +413,16 @@ struct i40e_virtchnl_pf_event { #define I40E_QUEUE_TYPE_PE_AEQ 0x80 #define I40E_QUEUE_INVALID_IDX 0xFFFF -struct i40e_virtchnl_iwarp_qv_info { +struct virtchnl_iwarp_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; u16 aeq_idx; u8 itr_idx; }; -struct i40e_virtchnl_iwarp_qvlist_info { +struct virtchnl_iwarp_qvlist_info { u32 num_vectors; - struct i40e_virtchnl_iwarp_qv_info qv_info[1]; + struct virtchnl_iwarp_qv_info qv_info[1]; }; /* VF reset states - these are written into the RSTAT register: @@ -436,11 +437,11 @@ struct i40e_virtchnl_iwarp_qvlist_info { * is in a reset state, it will return DEADBEEF, which, when masked * will result in 3. */ -enum i40e_vfr_states { - I40E_VFR_INPROGRESS = 0, - I40E_VFR_COMPLETED, - I40E_VFR_VFACTIVE, - I40E_VFR_UNKNOWN, +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, + VIRTCHNL_VFR_UNKNOWN, }; -#endif /* _I40E_VIRTCHNL_H_ */ +#endif /* _VIRTCHNL_H_ */ -- cgit v1.2.3 From eedcfef85b15ae02c488625556702594a618c616 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:13 -0700 Subject: virtchnl: convert to new macros As part of the conversion, change the arguments to VF_IS_V1[01] macros and move them to virtchnl.h Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 3 --- include/linux/avf/virtchnl.h | 3 +++ 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index d7fcc4ffa393..923026a255c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1481,7 +1481,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) vf->vf_ver = *(struct virtchnl_version_info *)msg; /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ - if (VF_IS_V10(vf)) + if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, @@ -1521,7 +1521,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) len = 0; goto err; } - if (VF_IS_V11(vf)) + if (VF_IS_V11(&vf->vf_ver)) vf->driver_caps = *(u32 *)msg; else vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | @@ -2557,7 +2557,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case VIRTCHNL_OP_RESET_VF: break; case VIRTCHNL_OP_GET_VF_RESOURCES: - if (VF_IS_V11(vf)) + if (VF_IS_V11(&vf->vf_ver)) valid_len = sizeof(u32); break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index b57ffffce141..1f4b0c504368 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -40,9 +40,6 @@ #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0x7000 -#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0)) -#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1)) - /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index a8b616121960..8ffa670c2ffd 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -123,6 +123,9 @@ struct virtchnl_version_info { u32 minor; }; +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + /* VIRTCHNL_OP_RESET_VF * VF sends this request to PF with no parameters * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register -- cgit v1.2.3 From 764430ce6f8c38d7ed3b6d2cfe9450b9d3c78809 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:14 -0700 Subject: i40e/virtchnl: refactor code for validate checks This change updates the arguments passed to the validate function and fixes the caller, as well as uses the new return values added to virtchnl.h One other minor tweak, remove a duplicate set to zero of valid_len. This is in preparation for moving the function to virtchnl.h. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 36 ++++++++++++---------- include/linux/avf/virtchnl.h | 17 ++++++++++ 2 files changed, 37 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 923026a255c0..61f948c587ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2536,15 +2536,16 @@ err: /** * i40e_vc_validate_vf_msg - * @vf: pointer to the VF info + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message * @msg: pointer to the msg buffer * @msglen: msg length - * @msghndl: msg handle * - * validate msg + * validate msg format against struct for each opcode **/ -static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) +static int +i40e_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) { bool err_msg_format = false; int valid_len = 0; @@ -2557,7 +2558,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case VIRTCHNL_OP_RESET_VF: break; case VIRTCHNL_OP_GET_VF_RESOURCES: - if (VF_IS_V11(&vf->vf_ver)) + if (VF_IS_V11(ver)) valid_len = sizeof(u32); break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: @@ -2633,7 +2634,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, err_msg_format = true; break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - valid_len = 0; break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); @@ -2673,15 +2673,13 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: - return -EPERM; + return VIRTCHNL_ERR_PARAM; } /* few more checks */ - if ((valid_len != msglen) || (err_msg_format)) { - i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); - return -EINVAL; - } else { - return 0; - } + if ((valid_len != msglen) || (err_msg_format)) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; } /** @@ -2713,7 +2711,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, return I40E_ERR_PARAM; /* perform basic checks on the msg */ - ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); + ret = i40e_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); /* perform additional checks specific to this driver */ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { @@ -2729,9 +2727,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, } if (ret) { + i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); - return ret; + switch (ret) { + case VIRTCHNL_ERR_PARAM: + return -EPERM; + default: + return -EINVAL; + } } switch (v_opcode) { diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 8ffa670c2ffd..f1cc1f02036e 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -53,6 +53,23 @@ * its queues, optionally add MAC and VLAN filters, and process traffic. */ +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_NOT_SUPPORTED = -64, +}; + +/* END GENERIC DEFINES */ + /* Opcodes for VF-PF communication. These are placed in the v_opcode field * of the virtchnl_msg structure. */ -- cgit v1.2.3 From f0adc6e831baaef16577ea2af5eb3e91fd4efef4 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:15 -0700 Subject: i40evf/virtchnl: whitespace cleanups This patch fixes up a bunch of whitespace issues introduced by the previous automated change of name from i40e to virtchnl. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 12 ++++-------- drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 3 +-- include/linux/avf/virtchnl.h | 6 +++--- 3 files changed, 8 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 61f948c587ad..422cccf0de86 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1485,8 +1485,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, - sizeof(struct - virtchnl_version_info)); + sizeof(struct virtchnl_version_info)); } /** @@ -1544,11 +1543,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) } else { if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) - vfres->vf_offload_flags |= - VIRTCHNL_VF_OFFLOAD_RSS_AQ; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else - vfres->vf_offload_flags |= - VIRTCHNL_VF_OFFLOAD_RSS_REG; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { @@ -2530,8 +2527,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, - aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 90a17b0347b9..d9f040900373 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1018,8 +1018,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { - struct virtchnl_rss_hena *vrh = - (struct virtchnl_rss_hena *)msg; + struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index f1cc1f02036e..73970bd439fe 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -175,10 +175,10 @@ struct virtchnl_vsi_resource { #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 #define VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 -#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 -#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 -#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -- cgit v1.2.3 From 233c1955835bd8649003be9bb3d8e79788b08be1 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 14 May 2017 15:49:57 +0300 Subject: RDMA/netlink: Reduce exposure of RDMA netlink functions RDMA netlink is part of ib_core, hence ibnl_chk_listeners(), ibnl_init() and ibnl_cleanup() don't need to be published in public header file. Let's remove EXPORT_SYMBOL from ibnl_chk_listeners() and move all these functions to private header file. CC: Yuval Shaia Signed-off-by: Leon Romanovsky Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/core/core_priv.h | 10 ++++++++++ drivers/infiniband/core/netlink.c | 2 +- include/rdma/rdma_netlink.h | 10 ---------- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..d92ab4eaa8f3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -169,6 +169,16 @@ void ib_mad_cleanup(void); int ib_sa_init(void); void ib_sa_cleanup(void); +int ibnl_init(void); +void ibnl_cleanup(void); + +/** + * Check if there are any listeners to the netlink group + * @group: the netlink group ID + * Returns 0 on success or a negative for no listeners. + */ +int ibnl_chk_listeners(unsigned int group); + int ib_nl_handle_resolve_resp(struct sk_buff *skb, struct netlink_callback *cb); int ib_nl_handle_set_timeout(struct sk_buff *skb, diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b784055423c8..94931c474d41 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -37,6 +37,7 @@ #include #include #include +#include "core_priv.h" struct ibnl_client { struct list_head list; @@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group) return -1; return 0; } -EXPORT_SYMBOL(ibnl_chk_listeners); int ibnl_add_client(int index, int nops, const struct ibnl_client_cbs cb_table[]) diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 585266144329..348c102cb5f6 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -10,9 +10,6 @@ struct ibnl_client_cbs { struct module *module; }; -int ibnl_init(void); -void ibnl_cleanup(void); - /** * Add a a client to the list of IB netlink exporters. * @index: Index of the added client @@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int group, gfp_t flags); -/** - * Check if there are any listeners to the netlink group - * @group: the netlink group ID - * Returns 0 on success or a negative for no listeners. - */ -int ibnl_chk_listeners(unsigned int group); - #endif /* _RDMA_NETLINK_H */ -- cgit v1.2.3 From d3957b86a40612826ef935f474b31359d66cbdca Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Sun, 21 May 2017 19:09:54 +0300 Subject: RDMA/SA: Fix kernel panic in CMA request handler flow Commit 9fdca4da4d8c (IB/SA: Split struct sa_path_rec based on IB and ROCE specific fields) moved the service_id to be specific attribute for IB and OPA SA Path Record, and thus wasn't assigned for RoCE. This caused to the following kernel panic in the CMA request handler flow: [ 27.074594] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 [ 27.074731] IP: __radix_tree_lookup+0x1d/0xe0 ... [ 27.075356] Workqueue: ib_cm cm_work_handler [ib_cm] [ 27.075401] task: ffff88022e3b8000 task.stack: ffffc90001298000 [ 27.075449] RIP: 0010:__radix_tree_lookup+0x1d/0xe0 ... [ 27.075979] Call Trace: [ 27.076015] radix_tree_lookup+0xd/0x10 [ 27.076055] cma_ps_find+0x59/0x70 [rdma_cm] [ 27.076097] cma_id_from_event+0xd2/0x470 [rdma_cm] [ 27.076144] ? ib_init_ah_from_path+0x39a/0x590 [ib_core] [ 27.076193] cma_req_handler+0x25/0x480 [rdma_cm] [ 27.076237] cm_process_work+0x25/0x120 [ib_cm] [ 27.076280] ? cm_get_bth_pkey.isra.62+0x3c/0xa0 [ib_cm] [ 27.076350] cm_req_handler+0xb03/0xd40 [ib_cm] [ 27.076430] ? sched_clock_cpu+0x11/0xb0 [ 27.076478] cm_work_handler+0x194/0x1588 [ib_cm] [ 27.076525] process_one_work+0x160/0x410 [ 27.076565] worker_thread+0x137/0x4a0 [ 27.076614] kthread+0x112/0x150 [ 27.076684] ? max_active_store+0x60/0x60 [ 27.077642] ? kthread_park+0x90/0x90 [ 27.078530] ret_from_fork+0x2c/0x40 This patch moves it back to the common SA Path Record structure and removes the redundant setter and getter. Tested on Connect-IB and Connect-X4 in Infiniband and RoCE respectively. Fixes: 9fdca4da4d8c (IB/SA: Split struct sa_path_rec based on IB ands ROCE specific fields) Signed-off-by: Majd Dibbiny Reviewed-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/cm.c | 4 ++-- drivers/infiniband/core/cma.c | 13 ++++++------- drivers/infiniband/core/sa_query.c | 6 +++--- drivers/infiniband/ulp/srp/ib_srp.c | 2 +- include/rdma/ib_sa.h | 25 +++---------------------- 5 files changed, 15 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1844770f3ae8..2b4d613a3474 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); - sa_path_set_service_id(primary_path, req_msg->service_id); + primary_path->service_id = req_msg->service_id; if (req_msg->alt_local_lid) { alt_path->dgid = req_msg->alt_local_gid; @@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); - sa_path_set_service_id(alt_path, req_msg->service_id); + alt_path->service_id = req_msg->service_id; } } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91b7a2fe5a55..31bb82d8ecd7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr, ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); - ib->sib_sid = sa_path_get_service_id(path); + ib->sib_sid = path->service_id; ib->sib_scope_id = 0; } else { ib->sib_pkey = listen_ib->sib_pkey; @@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, memcpy(&req->local_gid, &req_param->primary_path->sgid, sizeof(req->local_gid)); req->has_gid = true; - req->service_id = - sa_path_get_service_id(req_param->primary_path); + req->service_id = req_param->primary_path->service_id; req->pkey = be16_to_cpu(req_param->primary_path->pkey); if (req->pkey != req_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" @@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, struct rdma_route *rt; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; - const __be64 service_id = sa_path_get_service_id(path); + const __be64 service_id = + ib_event->param.req_rcvd.primary_path->service_id; int ret; id = rdma_create_id(listen_id->route.addr.dev_addr.net, @@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; - sa_path_set_service_id(&path_rec, - rdma_get_service_id(&id_priv->id, - cma_dst_addr(id_priv))); + path_rec.service_id = rdma_get_service_id(&id_priv->id, + cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e335b09c022e..fb7aec4047c8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -194,7 +194,7 @@ static u32 tid; .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { - { PATH_REC_FIELD(ib.service_id), + { PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = { .field_name = "sa_path_rec:" #field static const struct ib_field opa_path_rec_table[] = { - { OPA_PATH_REC_FIELD(opa.service_id), + { OPA_PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, /* Now build the attributes */ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { - val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); + val64 = be64_to_cpu(sa_rec->service_id); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, sizeof(val64), &val64); } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4306285fb155..2354c742caa1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch) ch->path.sgid = target->sgid; ch->path.dgid = target->orig_dgid; ch->path.pkey = target->pkey; - sa_path_set_service_id(&ch->path, target->service_id); + ch->path.service_id = target->service_id; return 0; } diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index f5f70e345318..355b81f4242d 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -158,7 +158,6 @@ enum sa_path_rec_type { }; struct sa_path_rec_ib { - __be64 service_id; __be16 dlid; __be16 slid; u8 raw_traffic; @@ -174,7 +173,6 @@ struct sa_path_rec_roce { }; struct sa_path_rec_opa { - __be64 service_id; __be32 dlid; __be32 slid; u8 raw_traffic; @@ -189,6 +187,7 @@ struct sa_path_rec_opa { struct sa_path_rec { union ib_gid dgid; union ib_gid sgid; + __be64 service_id; /* reserved */ __be32 flow_label; u8 hop_limit; @@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib, ib->ib.dlid = htons(ntohl(opa->opa.dlid)); ib->ib.slid = htons(ntohl(opa->opa.slid)); } - ib->ib.service_id = opa->opa.service_id; + ib->service_id = opa->service_id; ib->ib.raw_traffic = opa->opa.raw_traffic; } @@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa, } opa->opa.slid = slid; opa->opa.dlid = dlid; - opa->opa.service_id = ib->ib.service_id; + opa->service_id = ib->service_id; opa->opa.raw_traffic = ib->ib.raw_traffic; } @@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec) (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); } -static inline void sa_path_set_service_id(struct sa_path_rec *rec, - __be64 service_id) -{ - if (rec->rec_type == SA_PATH_REC_TYPE_IB) - rec->ib.service_id = service_id; - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) - rec->opa.service_id = service_id; -} - static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) { if (rec->rec_type == SA_PATH_REC_TYPE_IB) @@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec, rec->opa.raw_traffic = raw_traffic; } -static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec) -{ - if (rec->rec_type == SA_PATH_REC_TYPE_IB) - return rec->ib.service_id; - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) - return rec->opa.service_id; - return 0; -} - static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) { if (rec->rec_type == SA_PATH_REC_TYPE_IB) -- cgit v1.2.3 From ff3f4cc267f6f39c2fc525c8918c929809defbfa Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:16 -0700 Subject: virtchnl: finish conversion to virtchnl interface This patch implements the complete version of the virtchnl.h file with final renames, and fixes the related code in i40e and i40evf. It also expands comments, and adds details on the usage of certain fields. In addition, due to the changes a couple of casts are needed to prevent errors found by sparse after renaming some fields. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 18 +-- drivers/net/ethernet/intel/i40evf/i40e_common.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 11 +- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 6 +- include/linux/avf/virtchnl.h | 128 +++++++++++++-------- 6 files changed, 102 insertions(+), 65 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 422cccf0de86..352d9d2ef3d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -77,7 +77,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + pfe.severity = PF_EVENT_SEVERITY_INFO; if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = @@ -85,7 +85,8 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = ls->link_speed; + pfe.event_data.link_event.link_speed = + (enum virtchnl_link_speed)ls->link_speed; } i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); @@ -116,7 +117,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) struct virtchnl_pf_event pfe; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } @@ -144,7 +145,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(struct virtchnl_pf_event), NULL); @@ -1586,7 +1587,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; - vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ vfres->vsi_res[0].qset_handle @@ -1680,7 +1681,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, goto error_param; } /* Multicast promiscuous handling*/ - if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true; if (vf->port_vlan_id) { @@ -1731,7 +1732,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); } - if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) + if (info->flags & FLAG_VF_UNICAST_PROMISC) alluni = true; if (vf->port_vlan_id) { aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, @@ -3241,7 +3242,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + pfe.severity = PF_EVENT_SEVERITY_INFO; switch (link) { case IFLA_VF_LINK_STATE_AUTO: @@ -3249,6 +3250,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) pfe.event_data.link_event.link_status = pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = + (enum virtchnl_link_speed) pf->hw.phy.link_info.link_speed; break; case IFLA_VF_LINK_STATE_ENABLE: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 9a7d995080b6..9dec7753911c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1107,7 +1107,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.fcoe = 0; for (i = 0; i < msg->num_vsis; i++) { - if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); ether_addr_copy(hw->mac.addr, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 9d8c21b36332..6cc92089fecb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -263,7 +263,7 @@ struct i40evf_adapter { struct work_struct watchdog_task; bool netdev_registered; bool link_up; - enum i40e_aq_link_speed link_speed; + enum virtchnl_link_speed link_speed; enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_offload_flags & \ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5d7b613e0d62..1b00274de530 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1707,13 +1707,13 @@ static void i40evf_watchdog_task(struct work_struct *work) } if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { - i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC | - I40E_FLAG_VF_MULTICAST_PROMISC); + i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | + FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { - i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC); + i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } @@ -1969,7 +1969,8 @@ static void i40evf_adminq_task(struct work_struct *work) break; /* No event to process or error cleaning ARQ */ i40evf_virtchnl_completion(adapter, v_msg->v_opcode, - v_msg->v_retval, event.msg_buf, + (i40e_status)v_msg->v_retval, + event.msg_buf, event.msg_len); if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); @@ -2410,7 +2411,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) /* got VF config message back from PF, now we can parse it */ for (i = 0; i < vfres->num_vsis; i++) { - if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) adapter->vsi_res = &vfres->vsi_res[i]; } if (!adapter->vsi_res) { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d9f040900373..d2bb250a71af 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -662,15 +662,15 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) return; } - promisc_all = I40E_FLAG_VF_UNICAST_PROMISC | - I40E_FLAG_VF_MULTICAST_PROMISC; + promisc_all = FLAG_VF_UNICAST_PROMISC | + FLAG_VF_MULTICAST_PROMISC; if ((flags & promisc_all) == promisc_all) { adapter->flags |= I40EVF_FLAG_PROMISC_ON; adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } - if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) { + if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 73970bd439fe..6c6fbb492b5d 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -29,28 +29,29 @@ /* Description: * This header file describes the VF-PF communication protocol used - * by the various i40e drivers. + * by the drivers for all devices starting from our 40G product line * * Admin queue buffer usage: - * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * desc->opcode is always aqc_opc_send_msg_to_pf * flags, retval, datalen, and data addr are all used normally. - * Firmware copies the cookie fields when sending messages between the PF and - * VF, but uses all other fields internally. Due to this limitation, we - * must send all messages as "indirect", i.e. using an external buffer. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. * - * All the vsi indexes are relative to the VF. Each VF can have maximum of + * All the VSI indexes are relative to the VF. Each VF can have maximum of * three VSIs. All the queue indexes are relative to the VSI. Each VF can * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value is of - * i40e_status_code type, defined in the i40e_type.h. + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. * - * In general, VF driver initialization should roughly follow the order of these - * opcodes. The VF driver must first validate the API version of the PF driver, - * then request a reset, then get resources, then configure queues and - * interrupts. After these operations are complete, the VF driver may start - * its queues, optionally add MAC and VLAN filters, and process traffic. + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. */ /* START GENERIC DEFINES @@ -68,6 +69,33 @@ enum virtchnl_status_code { VIRTCHNL_STATUS_NOT_SUPPORTED = -64, }; +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field @@ -77,6 +105,8 @@ enum virtchnl_ops { /* The PF sends status change events to VFs using * the VIRTCHNL_OP_EVENT opcode. * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. */ VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ @@ -96,14 +126,13 @@ enum virtchnl_ops { VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ - VIRTCHNL_OP_IWARP = 20, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_SET_RSS_HENA = 26, - }; /* Virtual channel message descriptor. This overlays the admin queue @@ -113,7 +142,7 @@ enum virtchnl_ops { struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - i40e_status v_retval; /* ditto for desc->retval */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ u32 vfid; /* used by PF when sending to VF */ }; @@ -155,6 +184,15 @@ struct virtchnl_version_info { * are cleared. */ +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + /* VIRTCHNL_OP_GET_VF_RESOURCES * Version 1.0 VF sends this request to PF with no parameters * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities @@ -166,14 +204,18 @@ struct virtchnl_version_info { struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; - enum i40e_vsi_type vsi_type; + enum virtchnl_vsi_type vsi_type; u16 qset_handle; u8 default_mac_addr[ETH_ALEN]; }; -/* VF offload flags */ -#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 + +/* VF offload flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 -#define VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 @@ -182,11 +224,12 @@ struct virtchnl_vsi_resource { #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 -#define I40E_VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ - VIRTCHNL_VF_OFFLOAD_VLAN | \ - VIRTCHNL_VF_OFFLOAD_RSS_PF) +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) struct virtchnl_vf_resource { u16 num_vsis; @@ -212,9 +255,9 @@ struct virtchnl_txq_info { u16 vsi_id; u16 queue_id; u16 ring_len; /* number of descriptors, multiple of 8 */ - u16 headwb_enabled; + u16 headwb_enabled; /* deprecated with AVF 1.0 */ u64 dma_ring_addr; - u64 dma_headwb_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ }; /* VIRTCHNL_OP_CONFIG_RX_QUEUE @@ -229,11 +272,11 @@ struct virtchnl_rxq_info { u16 queue_id; u32 ring_len; /* number of descriptors, multiple of 32 */ u16 hdr_size; - u16 splithdr_enabled; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; u64 dma_ring_addr; - enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ }; /* VIRTCHNL_OP_CONFIG_VSI_QUEUES @@ -344,15 +387,15 @@ struct virtchnl_promisc_info { u16 flags; }; -#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 -#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 /* VIRTCHNL_OP_GET_STATS * VF sends this message to request stats for the selected VSI. VF uses * the virtchnl_queue_select struct to specify the VSI. The queue_id * field is ignored by the PF. * - * PF replies with struct i40e_eth_stats in an external buffer. + * PF replies with struct eth_stats in an external buffer. */ /* VIRTCHNL_OP_CONFIG_RSS_KEY @@ -382,7 +425,6 @@ struct virtchnl_rss_lut { * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. - * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h */ struct virtchnl_rss_hena { u64 hena; @@ -399,14 +441,15 @@ enum virtchnl_event_codes { VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, }; -#define I40E_PF_EVENT_SEVERITY_INFO 0 -#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { enum virtchnl_event_codes event; union { struct { - enum i40e_aq_link_speed link_speed; + enum virtchnl_link_speed link_speed; bool link_status; } link_event; } event_data; @@ -426,13 +469,6 @@ struct virtchnl_pf_event { * PF configures interrupt mapping and returns status. */ -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF - struct virtchnl_iwarp_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; @@ -446,8 +482,7 @@ struct virtchnl_iwarp_qvlist_info { }; /* VF reset states - these are written into the RSTAT register: - * I40E_VFGEN_RSTAT1 on the PF - * I40E_VFGEN_RSTAT on the VF + * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 * When the reset is complete, it writes 1 * When the PF detects that the VF has recovered, it writes 2 @@ -461,7 +496,6 @@ enum virtchnl_vfr_states { VIRTCHNL_VFR_INPROGRESS = 0, VIRTCHNL_VFR_COMPLETED, VIRTCHNL_VFR_VFACTIVE, - VIRTCHNL_VFR_UNKNOWN, }; #endif /* _VIRTCHNL_H_ */ -- cgit v1.2.3 From 735e35c56bbc91621942dc5111b2970beb00e75a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 May 2017 11:23:17 -0700 Subject: i40e/virtchnl: move function to virtchnl This moves a function that is needed for the virtchnl interface from the i40e PF driver over to the virtchnl.h file. It was manually verified that the function in question is unchanged except for the function name and function header, which explains the slight difference in the number of lines removed/added. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 150 +-------------------- include/linux/avf/virtchnl.h | 147 ++++++++++++++++++++ 2 files changed, 148 insertions(+), 149 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 352d9d2ef3d2..6bee254d34ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2531,154 +2531,6 @@ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); } -/** - * i40e_vc_validate_vf_msg - * @ver: Virtchnl version info - * @v_opcode: Opcode for the message - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * validate msg format against struct for each opcode - **/ -static int -i40e_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, - u8 *msg, u16 msglen) -{ - bool err_msg_format = false; - int valid_len = 0; - - /* Validate message length. */ - switch (v_opcode) { - case VIRTCHNL_OP_VERSION: - valid_len = sizeof(struct virtchnl_version_info); - break; - case VIRTCHNL_OP_RESET_VF: - break; - case VIRTCHNL_OP_GET_VF_RESOURCES: - if (VF_IS_V11(ver)) - valid_len = sizeof(u32); - break; - case VIRTCHNL_OP_CONFIG_TX_QUEUE: - valid_len = sizeof(struct virtchnl_txq_info); - break; - case VIRTCHNL_OP_CONFIG_RX_QUEUE: - valid_len = sizeof(struct virtchnl_rxq_info); - break; - case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - valid_len = sizeof(struct virtchnl_vsi_queue_config_info); - if (msglen >= valid_len) { - struct virtchnl_vsi_queue_config_info *vqc = - (struct virtchnl_vsi_queue_config_info *)msg; - valid_len += (vqc->num_queue_pairs * - sizeof(struct - virtchnl_queue_pair_info)); - if (vqc->num_queue_pairs == 0) - err_msg_format = true; - } - break; - case VIRTCHNL_OP_CONFIG_IRQ_MAP: - valid_len = sizeof(struct virtchnl_irq_map_info); - if (msglen >= valid_len) { - struct virtchnl_irq_map_info *vimi = - (struct virtchnl_irq_map_info *)msg; - valid_len += (vimi->num_vectors * - sizeof(struct virtchnl_vector_map)); - if (vimi->num_vectors == 0) - err_msg_format = true; - } - break; - case VIRTCHNL_OP_ENABLE_QUEUES: - case VIRTCHNL_OP_DISABLE_QUEUES: - valid_len = sizeof(struct virtchnl_queue_select); - break; - case VIRTCHNL_OP_ADD_ETH_ADDR: - case VIRTCHNL_OP_DEL_ETH_ADDR: - valid_len = sizeof(struct virtchnl_ether_addr_list); - if (msglen >= valid_len) { - struct virtchnl_ether_addr_list *veal = - (struct virtchnl_ether_addr_list *)msg; - valid_len += veal->num_elements * - sizeof(struct virtchnl_ether_addr); - if (veal->num_elements == 0) - err_msg_format = true; - } - break; - case VIRTCHNL_OP_ADD_VLAN: - case VIRTCHNL_OP_DEL_VLAN: - valid_len = sizeof(struct virtchnl_vlan_filter_list); - if (msglen >= valid_len) { - struct virtchnl_vlan_filter_list *vfl = - (struct virtchnl_vlan_filter_list *)msg; - valid_len += vfl->num_elements * sizeof(u16); - if (vfl->num_elements == 0) - err_msg_format = true; - } - break; - case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - valid_len = sizeof(struct virtchnl_promisc_info); - break; - case VIRTCHNL_OP_GET_STATS: - valid_len = sizeof(struct virtchnl_queue_select); - break; - case VIRTCHNL_OP_IWARP: - /* These messages are opaque to us and will be validated in - * the RDMA client code. We just need to check for nonzero - * length. The firmware will enforce max length restrictions. - */ - if (msglen) - valid_len = msglen; - else - err_msg_format = true; - break; - case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - break; - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); - if (msglen >= valid_len) { - struct virtchnl_iwarp_qvlist_info *qv = - (struct virtchnl_iwarp_qvlist_info *)msg; - if (qv->num_vectors == 0) { - err_msg_format = true; - break; - } - valid_len += ((qv->num_vectors - 1) * - sizeof(struct virtchnl_iwarp_qv_info)); - } - break; - case VIRTCHNL_OP_CONFIG_RSS_KEY: - valid_len = sizeof(struct virtchnl_rss_key); - if (msglen >= valid_len) { - struct virtchnl_rss_key *vrk = - (struct virtchnl_rss_key *)msg; - valid_len += vrk->key_len - 1; - } - break; - case VIRTCHNL_OP_CONFIG_RSS_LUT: - valid_len = sizeof(struct virtchnl_rss_lut); - if (msglen >= valid_len) { - struct virtchnl_rss_lut *vrl = - (struct virtchnl_rss_lut *)msg; - valid_len += vrl->lut_entries - 1; - } - break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - break; - case VIRTCHNL_OP_SET_RSS_HENA: - valid_len = sizeof(struct virtchnl_rss_hena); - break; - /* These are always errors coming from the VF. */ - case VIRTCHNL_OP_EVENT: - case VIRTCHNL_OP_UNKNOWN: - default: - return VIRTCHNL_ERR_PARAM; - } - /* few more checks */ - if ((valid_len != msglen) || (err_msg_format)) - return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; - - return 0; -} - /** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure @@ -2708,7 +2560,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, return I40E_ERR_PARAM; /* perform basic checks on the msg */ - ret = i40e_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); + ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); /* perform additional checks specific to this driver */ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 6c6fbb492b5d..dab76e947b9f 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -498,4 +498,151 @@ enum virtchnl_vfr_states { VIRTCHNL_VFR_VFACTIVE, }; +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_ERR_PARAM; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} #endif /* _VIRTCHNL_H_ */ -- cgit v1.2.3 From a33c83c4353b2efc4d883bad06a86a9ba2dde4fc Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 11 May 2017 11:23:18 -0700 Subject: virtchnl: Add pad fields to a couple of structures This removes holes and makes structure sizes consistent across 32 and 64 bit builds. Signed-off-by: Sridhar Samudrala Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- include/linux/avf/virtchnl.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index dab76e947b9f..72466c69f749 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -275,8 +275,10 @@ struct virtchnl_rxq_info { u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; + u32 pad1; u64 dma_ring_addr; enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; }; /* VIRTCHNL_OP_CONFIG_VSI_QUEUES @@ -295,6 +297,7 @@ struct virtchnl_queue_pair_info { struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; + u32 pad; struct virtchnl_queue_pair_info qpair[1]; }; -- cgit v1.2.3 From 73556269aab30c39cba9cf8efafc402d0deb87b2 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 11 May 2017 11:23:19 -0700 Subject: virtchnl: Add compile time static asserts to validate structure sizes This uses preprocessor tricks to make sure that a divide by zero occurs if a struct changes size outside the expected number of bytes. Signed-off-by: Sridhar Samudrala Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- include/linux/avf/virtchnl.h | 50 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'include') diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 72466c69f749..c893b9520a67 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -135,6 +135,14 @@ enum virtchnl_ops { VIRTCHNL_OP_SET_RSS_HENA = 26, }; +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + /* Virtual channel message descriptor. This overlays the admin queue * descriptor. All other data is passed in external buffers. */ @@ -146,6 +154,8 @@ struct virtchnl_msg { u32 vfid; /* used by PF when sending to VF */ }; +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + /* Message descriptions and data structures.*/ /* VIRTCHNL_OP_VERSION @@ -169,6 +179,8 @@ struct virtchnl_version_info { u32 minor; }; +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) @@ -209,6 +221,8 @@ struct virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + /* VF offload flags * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. @@ -244,6 +258,8 @@ struct virtchnl_vf_resource { struct virtchnl_vsi_resource vsi_res[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + /* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. * External data buffer contains one instance of virtchnl_txq_info. @@ -260,6 +276,8 @@ struct virtchnl_txq_info { u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ }; +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. @@ -281,6 +299,8 @@ struct virtchnl_rxq_info { u32 pad2; }; +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + /* VIRTCHNL_OP_CONFIG_VSI_QUEUES * VF sends this message to set parameters for all active TX and RX queues * associated with the specified VSI. @@ -294,6 +314,8 @@ struct virtchnl_queue_pair_info { struct virtchnl_rxq_info rxq; }; +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; @@ -301,6 +323,8 @@ struct virtchnl_vsi_queue_config_info { struct virtchnl_queue_pair_info qpair[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues @@ -317,11 +341,15 @@ struct virtchnl_vector_map { u16 txitr_idx; }; +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + struct virtchnl_irq_map_info { u16 num_vectors; struct virtchnl_vector_map vecmap[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + /* VIRTCHNL_OP_ENABLE_QUEUES * VIRTCHNL_OP_DISABLE_QUEUES * VF sends these message to enable or disable TX/RX queue pairs. @@ -337,6 +365,8 @@ struct virtchnl_queue_select { u32 tx_queues; }; +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + /* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. @@ -354,12 +384,16 @@ struct virtchnl_ether_addr { u8 pad[2]; }; +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; struct virtchnl_ether_addr list[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + /* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. * PF adds the filters and returns status. @@ -380,6 +414,8 @@ struct virtchnl_vlan_filter_list { u16 vlan_id[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. @@ -390,6 +426,8 @@ struct virtchnl_promisc_info { u16 flags; }; +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + #define FLAG_VF_UNICAST_PROMISC 0x00000001 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 @@ -416,12 +454,16 @@ struct virtchnl_rss_key { u8 key[1]; /* RSS hash key, packed bytes */ }; +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table*/ }; +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. @@ -433,6 +475,8 @@ struct virtchnl_rss_hena { u64 hena; }; +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other @@ -460,6 +504,8 @@ struct virtchnl_pf_event { int severity; }; +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP * VF uses this message to request PF to map IWARP vectors to IWARP queues. * The request for this originates from the VF IWARP driver through @@ -479,11 +525,15 @@ struct virtchnl_iwarp_qv_info { u8 itr_idx; }; +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + struct virtchnl_iwarp_qvlist_info { u32 num_vectors; struct virtchnl_iwarp_qv_info qv_info[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 -- cgit v1.2.3 From 73a7ece8f70c955464080e434b5324bcdfdcb1b1 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 1 Jun 2017 16:07:11 -0400 Subject: net: dsa: comment hot path requirements The DSA layer uses inline helpers and copy of the tagging functions for faster access in hot path. Add comments to detail that. Reviewed-by: Florian Fainelli Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 3 +++ net/dsa/dsa_priv.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 7de1234ba136..18ca0a935c96 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -127,6 +127,8 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; + + /* Copy of tag_ops->rcv for faster access in hot path */ struct sk_buff * (*rcv)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, @@ -465,6 +467,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); struct net_device *dsa_dev_to_net_device(struct device *dev); +/* Keep inline for faster access in hot path */ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { return dst->rcv != NULL; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 7459d5735d8b..db2a7b9edfb8 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -73,6 +73,7 @@ struct dsa_device_ops { }; struct dsa_slave_priv { + /* Copy of dp->ds->dst->tag_ops->xmit for faster access in hot path */ struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); -- cgit v1.2.3 From 717ffbfb28ac2b53d3140b2a7dfd345e1569b3a5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Thu, 1 Jun 2017 16:07:13 -0400 Subject: net: dsa: remove dsa_uses_tagged_protocol Since dev->dsa_ptr is a pointer to a dsa_switch_tree, there is no need to have another inline helper just to check rcv. Remove dsa_uses_tagged_protocol and check dsa_ptr && dsa_ptr->rcv together at the same time. Reviewed-by: Florian Fainelli Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 18ca0a935c96..448d8bc77707 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -468,16 +468,10 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); struct net_device *dsa_dev_to_net_device(struct device *dev); /* Keep inline for faster access in hot path */ -static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) -{ - return dst->rcv != NULL; -} - static inline bool netdev_uses_dsa(struct net_device *dev) { #if IS_ENABLED(CONFIG_NET_DSA) - if (dev->dsa_ptr != NULL) - return dsa_uses_tagged_protocol(dev->dsa_ptr); + return dev->dsa_ptr && dev->dsa_ptr->rcv; #endif return false; } -- cgit v1.2.3 From 3a5f8997dc643a0e0e9a0895c2214b21e5e774a2 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Thu, 1 Jun 2017 15:37:02 +0800 Subject: team: add macro MODULE_ALIAS_TEAM_MODE for team mode alias Add a new macro MODULE_ALIAS_TEAM_MODE to unify and simplify the declaration of team mode alias. Signed-off-by: Zhang Shengju Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team_mode_activebackup.c | 2 +- drivers/net/team/team_mode_broadcast.c | 2 +- drivers/net/team/team_mode_loadbalance.c | 2 +- drivers/net/team/team_mode_random.c | 2 +- drivers/net/team/team_mode_roundrobin.c | 2 +- include/linux/if_team.h | 2 ++ 6 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c index 3f189823ba3b..ddd16a0c1fc1 100644 --- a/drivers/net/team/team_mode_activebackup.c +++ b/drivers/net/team/team_mode_activebackup.c @@ -146,4 +146,4 @@ module_exit(ab_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Active-backup mode for team"); -MODULE_ALIAS("team-mode-activebackup"); +MODULE_ALIAS_TEAM_MODE("activebackup"); diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 302ff35b0cbc..e4eac3de1862 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -75,4 +75,4 @@ module_exit(bc_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Broadcast mode for team"); -MODULE_ALIAS("team-mode-broadcast"); +MODULE_ALIAS_TEAM_MODE("broadcast"); diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index b228bea7931f..1468ddf424cc 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -695,4 +695,4 @@ module_exit(lb_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Load-balancing mode for team"); -MODULE_ALIAS("team-mode-loadbalance"); +MODULE_ALIAS_TEAM_MODE("loadbalance"); diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c index 215f845782db..c20b9446e2e4 100644 --- a/drivers/net/team/team_mode_random.c +++ b/drivers/net/team/team_mode_random.c @@ -65,4 +65,4 @@ module_exit(rnd_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Random mode for team"); -MODULE_ALIAS("team-mode-random"); +MODULE_ALIAS_TEAM_MODE("random"); diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index 0aa234118c03..66c3209dc4a6 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -77,4 +77,4 @@ module_exit(rr_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Round-robin mode for team"); -MODULE_ALIAS("team-mode-roundrobin"); +MODULE_ALIAS_TEAM_MODE("roundrobin"); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index c05216a8fbac..30294603526f 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -298,4 +298,6 @@ extern void team_mode_unregister(const struct team_mode *mode); #define TEAM_DEFAULT_NUM_TX_QUEUES 16 #define TEAM_DEFAULT_NUM_RX_QUEUES 16 +#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) + #endif /* _LINUX_IF_TEAM_H_ */ -- cgit v1.2.3 From 3c5da94278026a4583320f97f6547573fb3a93aa Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:31 +0300 Subject: qed: Share additional information with qedf Share several new tidbits with qedf: - wwpn & wwnn - Absolute pf-id [this one is actually meant for qedi as well] - Number of available CQs While we're at it, now that qedf will be aware of the available CQs we can add some validation on the inputs it provides. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 8 +++++++- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 14 ++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 ++ include/linux/qed/qed_fcoe_if.h | 5 +++++ include/linux/qed/qed_if.h | 2 ++ 5 files changed, 30 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7649f35000db..2d88d4883483 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2071,16 +2071,22 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) QED_VF_L2_QUE)); } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), (int)sb_cnt.cnt); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 3fc4ff22960e..df195c02b711 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -141,6 +141,15 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data = &p_ramrod->init_ramrod_data; fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + /* Sanity */ + if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", + fcoe_pf_params->num_cqs, + p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); + return -EINVAL; + } + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); p_data->sq_num_pages_in_pbl = tmp; @@ -739,6 +748,11 @@ static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, info->secondary_bdq_rq_addr = qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + info->wwpn = hwfn->mcp_info->func_info.wwn_port; + info->wwnn = hwfn->mcp_info->func_info.wwn_node; + + info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); + return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index ac3bdcd9f0b6..baebd5926895 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -269,6 +269,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index bd6bcb809415..1e015c50e6b8 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -24,6 +24,11 @@ struct qed_dev_fcoe_info { void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; + + u64 wwpn; + u64 wwnn; + + u8 num_cqs; }; struct qed_fcoe_params_offload { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 607e1c5e185a..e29c6f74a4d4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -360,6 +360,8 @@ struct qed_dev_info { bool vxlan_enable; bool gre_enable; bool geneve_enable; + + u8 abs_pf_id; }; enum qed_sb_type { -- cgit v1.2.3 From 20675b37ee76d11430fd3d4da0851fc6a4e36abc Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:32 +0300 Subject: qed: Support NVM-image reading API Storage drivers require images from the nvram in boot-from-SAN scenarios. This provides the necessary API between qed and the protocol drivers to perform such reads. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 16 ++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 89 ++++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 21 +++++++ include/linux/qed/qed_if.h | 18 ++++++ 4 files changed, 144 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index baebd5926895..6ac10ce14e5b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1535,6 +1535,21 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, + u8 *buf, u16 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + int rc; + + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); + qed_ptt_release(hwfn, ptt); + return rc; +} + static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) { *rx_coal = cdev->rx_coalesce_usecs; @@ -1712,6 +1727,7 @@ const struct qed_common_ops qed_common_ops_pass = { .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .nvm_get_image = &qed_nvm_get_image, .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e82f32950361..9da91045d167 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2310,6 +2310,95 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, return rc; } +static int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att) +{ + struct bist_nvm_image_att mfw_image_att; + enum nvm_image_type type; + u32 num_images, i; + int rc; + + /* Translate image_id into MFW definitions */ + switch (image_id) { + case QED_NVM_IMAGE_ISCSI_CFG: + type = NVM_TYPE_ISCSI_CFG; + break; + case QED_NVM_IMAGE_FCOE_CFG: + type = NVM_TYPE_FCOE_CFG; + break; + default: + DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", + image_id); + return -EINVAL; + } + + /* Learn number of images, then traverse and see if one fits */ + rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) + return -EINVAL; + + for (i = 0; i < num_images; i++) { + rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, + &mfw_image_att, i); + if (rc) + return rc; + + if (type == mfw_image_att.image_type) + break; + } + if (i == num_images) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Failed to find nvram image of type %08x\n", + image_id); + return -EINVAL; + } + + p_image_att->start_addr = mfw_image_att.nvm_start_addr; + p_image_att->length = mfw_image_att.len; + + return 0; +} + +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len) +{ + struct qed_nvm_image_att image_att; + int rc; + + memset(p_buffer, 0, buffer_len); + + rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + if (rc) + return rc; + + /* Validate sizes - both the image's and the supplied buffer's */ + if (image_att.length <= 4) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Image [%d] is too small - only %d bytes\n", + image_id, image_att.length); + return -EINVAL; + } + + /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ + image_att.length -= 4; + + if (image_att.length > buffer_len) { + DP_VERBOSE(p_hwfn, + QED_MSG_STORAGE, + "Image [%d] is too big - %08x bytes where only %08x are available\n", + image_id, image_att.length, buffer_len); + return -ENOMEM; + } + + return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, + p_buffer, image_att.length); +} + static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) { enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 40247593e772..af03b3651411 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -430,6 +430,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); +struct qed_nvm_image_att { + u32 start_addr; + u32 length; +}; + +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn + * @param p_ptt + * @param image_id - image requested for reading + * @param p_buffer - allocated buffer into which to fill data + * @param buffer_len - length of the allocated buffer. + * + * @return 0 iff p_buffer now contains the nvram image. + */ +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len); + /** * @brief Bist register test * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e29c6f74a4d4..567ea3ea6c0e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -156,6 +156,11 @@ struct qed_dcbx_get { struct qed_dcbx_admin_params local; }; +enum qed_nvm_images { + QED_NVM_IMAGE_ISCSI_CFG, + QED_NVM_IMAGE_FCOE_CFG, +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -630,6 +635,19 @@ struct qed_common_ops { void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); +/** + * @brief nvm_get_image - reads an entire image from nvram + * + * @param cdev + * @param type - type of the request nvram image + * @param buf - preallocated buffer to fill with the image + * @param len - length of the allocated buffer + * + * @return 0 on success, error otherwise + */ + int (*nvm_get_image)(struct qed_dev *cdev, + enum qed_nvm_images type, u8 *buf, u16 len); + /** * @brief get_coalesce - Get coalesce parameters in usec * -- cgit v1.2.3 From dc4528e9e890f82900d75ac6276aba8ce89a80b6 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:33 +0300 Subject: qed: Add support for changing iSCSI mac Enhance API between qedi and qed, allowing qedi to inform device's firmware when the iSCSI mac is to be changed. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 66 +++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + include/linux/qed/qed_iscsi_if.h | 7 +++ 3 files changed, 74 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index bc8ce09d390f..6103723d7118 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -488,6 +488,54 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int +qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_mac_update *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u8 ucval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + ucval = p_conn->remote_mac[1]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; + ucval = p_conn->remote_mac[0]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval; + ucval = p_conn->remote_mac[3]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval; + ucval = p_conn->remote_mac[2]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval; + ucval = p_conn->remote_mac[5]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval; + ucval = p_conn->remote_mac[4]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, @@ -1324,6 +1372,23 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); } +static int qed_iscsi_change_mac(struct qed_dev *cdev, + u32 handle, const u8 *mac) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats) { @@ -1358,6 +1423,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = { .destroy_conn = &qed_iscsi_destroy_conn, .clear_sq = &qed_iscsi_clear_conn_sq, .get_stats = &qed_iscsi_stats, + .change_mac = &qed_iscsi_change_mac, }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b9464f3ab0e2..00dd50f8c42f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -120,6 +120,7 @@ union ramrod_data { struct iscsi_spe_func_dstry iscsi_destroy; struct iscsi_spe_conn_offload iscsi_conn_offload; struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_mac_update iscsi_conn_mac_update; struct iscsi_spe_conn_termination iscsi_conn_terminate; struct vf_start_ramrod_data vf_start; diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 3414649133d2..111e606a74c8 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -210,6 +210,11 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. + * @change_mac Change MAC of interface + * @param cdev + * @param handle - the connection handle. + * @param mac - new MAC to configure. + * @return 0 on success, otherwise error value. */ struct qed_iscsi_ops { const struct qed_common_ops *common; @@ -248,6 +253,8 @@ struct qed_iscsi_ops { int (*get_stats)(struct qed_dev *cdev, struct qed_iscsi_stats *stats); + + int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void); -- cgit v1.2.3 From cee360ab4dd66fc1de33a5fa1cb418fa21c27ce3 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 31 May 2017 16:36:31 +0800 Subject: sctp: define the member stream as an object instead of pointer in asoc As Marcelo's suggestion, stream is a fixed size member of asoc and would not grow with more streams. To avoid an allocation for it, this patch is to define it as an object instead of pointer and update the places using it, also create sctp_stream_update() called in sctp_assoc_update() to migrate the stream info from one stream to another. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 +- net/sctp/associola.c | 13 ++++----- net/sctp/chunk.c | 4 +-- net/sctp/outqueue.c | 10 +++---- net/sctp/proc.c | 4 +-- net/sctp/sm_make_chunk.c | 2 +- net/sctp/sm_statefuns.c | 8 +++--- net/sctp/socket.c | 14 +++++----- net/sctp/stream.c | 68 ++++++++++++++++++++++------------------------ net/sctp/ulpqueue.c | 8 +++--- 10 files changed, 65 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a8b38e123f97..c8dbf410c4f5 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -381,6 +381,7 @@ int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); +void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); /* What is the current SSN number for this stream? */ #define sctp_ssn_peek(stream, type, sid) \ @@ -1750,7 +1751,7 @@ struct sctp_association { __u32 default_rcv_context; /* Stream arrays */ - struct sctp_stream *stream; + struct sctp_stream stream; /* All outbound chunks go through this structure. */ struct sctp_outq outqueue; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 95238284c422..6625b15ab81a 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -291,7 +291,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a return asoc; stream_free: - sctp_stream_free(asoc->stream); + sctp_stream_free(&asoc->stream); fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); @@ -365,7 +365,7 @@ void sctp_association_free(struct sctp_association *asoc) sctp_tsnmap_free(&asoc->peer.tsn_map); /* Free stream information. */ - sctp_stream_free(asoc->stream); + sctp_stream_free(&asoc->stream); if (asoc->strreset_chunk) sctp_chunk_free(asoc->strreset_chunk); @@ -1151,7 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc, /* Reinitialize SSN for both local streams * and peer's streams. */ - sctp_stream_clear(asoc->stream); + sctp_stream_clear(&asoc->stream); /* Flush the ULP reassembly and ordered queue. * Any data there will now be stale and will @@ -1177,11 +1177,8 @@ void sctp_assoc_update(struct sctp_association *asoc, asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; - if (sctp_state(asoc, COOKIE_WAIT)) { - sctp_stream_free(asoc->stream); - asoc->stream = new->stream; - new->stream = NULL; - } + if (sctp_state(asoc, COOKIE_WAIT)) + sctp_stream_update(&asoc->stream, &new->stream); if (!asoc->assoc_id) { /* get a new association id since we don't have one diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 697721a7a3f1..81466f6442e8 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -307,7 +307,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk) if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && time_after(jiffies, chunk->msg->expires_at)) { struct sctp_stream_out *streamout = - &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream]; + &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream]; if (chunk->sent_count) { chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; @@ -320,7 +320,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk) } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && chunk->sent_count > chunk->sinfo.sinfo_timetolive) { struct sctp_stream_out *streamout = - &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream]; + &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream]; chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; streamout->abandoned_sent[SCTP_PR_INDEX(RTX)]++; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index fe4c3d462f6e..20299df163b9 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -363,7 +363,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, sctp_insert_list(&asoc->outqueue.abandoned, &chk->transmitted_list); - streamout = &asoc->stream->out[chk->sinfo.sinfo_stream]; + streamout = &asoc->stream.out[chk->sinfo.sinfo_stream]; asoc->sent_cnt_removable--; asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; @@ -400,9 +400,9 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, q->out_qlen -= chk->skb->len; asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; - if (chk->sinfo.sinfo_stream < asoc->stream->outcnt) { + if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { struct sctp_stream_out *streamout = - &asoc->stream->out[chk->sinfo.sinfo_stream]; + &asoc->stream.out[chk->sinfo.sinfo_stream]; streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; } @@ -1036,7 +1036,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ - if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) { + if (chunk->sinfo.sinfo_stream >= asoc->stream.outcnt) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); @@ -1054,7 +1054,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) continue; } - if (asoc->stream->out[sid].state == SCTP_STREAM_CLOSED) { + if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { sctp_outq_head_data(q, chunk); goto sctp_flush_out; } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index a0b29d43627f..5a27d0f03df5 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " "%8d %8d %8d %8d", - assoc->hbinterval, assoc->stream->incnt, - assoc->stream->outcnt, assoc->max_retrans, + assoc->hbinterval, assoc->stream.incnt, + assoc->stream.outcnt, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks, atomic_read(&sk->sk_wmem_alloc), diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 92e332e17391..244181413bca 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1544,7 +1544,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); - stream = chunk->asoc->stream; + stream = &chunk->asoc->stream; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index f863b5573e42..df73190da761 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3958,7 +3958,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->stream->incnt) + if (ntohs(skip->stream) >= asoc->stream.incnt) goto discard_noforce; } @@ -4029,7 +4029,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->stream->incnt) + if (ntohs(skip->stream) >= asoc->stream.incnt) goto gen_shutdown; } @@ -6365,7 +6365,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, * and discard the DATA chunk. */ sid = ntohs(data_hdr->stream); - if (sid >= asoc->stream->incnt) { + if (sid >= asoc->stream.incnt) { /* Mark tsn as received even though we drop it */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); @@ -6387,7 +6387,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, * and is invalid. */ ssn = ntohs(data_hdr->ssn); - if (ordered && SSN_lt(ssn, sctp_ssn_peek(asoc->stream, in, sid))) + if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid))) return SCTP_IERROR_PROTO_VIOLATION; /* Send the data up to the user. Note: Schedule the diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f16c8d97b7f3..0822046e4f3f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } /* Check for invalid stream. */ - if (sinfo->sinfo_stream >= asoc->stream->outcnt) { + if (sinfo->sinfo_stream >= asoc->stream.outcnt) { err = -EINVAL; goto out_free; } @@ -4497,8 +4497,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - info->sctpi_instrms = asoc->stream->incnt; - info->sctpi_outstrms = asoc->stream->outcnt; + info->sctpi_instrms = asoc->stream.incnt; + info->sctpi_outstrms = asoc->stream.outcnt; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) @@ -4727,8 +4727,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - status.sstat_instrms = asoc->stream->incnt; - status.sstat_outstrms = asoc->stream->outcnt; + status.sstat_instrms = asoc->stream.incnt; + status.sstat_outstrms = asoc->stream.outcnt; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, @@ -6600,10 +6600,10 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); - if (!asoc || params.sprstat_sid >= asoc->stream->outcnt) + if (!asoc || params.sprstat_sid >= asoc->stream.outcnt) goto out; - streamout = &asoc->stream->out[params.sprstat_sid]; + streamout = &asoc->stream.out[params.sprstat_sid]; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index dda53a293986..af6b49850344 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -37,30 +37,23 @@ int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp) { - struct sctp_stream *stream; + struct sctp_stream *stream = &asoc->stream; int i; - stream = kzalloc(sizeof(*stream), gfp); - if (!stream) - return -ENOMEM; - stream->outcnt = asoc->c.sinit_num_ostreams; stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); - if (!stream->out) { - kfree(stream); + if (!stream->out) return -ENOMEM; - } + for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; - asoc->stream = stream; - return 0; } int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) { - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; int i; /* Initial stream->out size may be very big, so free it and alloc @@ -70,7 +63,7 @@ int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) stream->outcnt = asoc->c.sinit_num_ostreams; stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); if (!stream->out) - goto nomem; + return -ENOMEM; for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; @@ -79,26 +72,17 @@ int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); if (!stream->in) { kfree(stream->out); - goto nomem; + stream->out = NULL; + return -ENOMEM; } return 0; - -nomem: - asoc->stream = NULL; - kfree(stream); - - return -ENOMEM; } void sctp_stream_free(struct sctp_stream *stream) { - if (unlikely(!stream)) - return; - kfree(stream->out); kfree(stream->in); - kfree(stream); } void sctp_stream_clear(struct sctp_stream *stream) @@ -112,6 +96,19 @@ void sctp_stream_clear(struct sctp_stream *stream) stream->in[i].ssn = 0; } +void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) +{ + sctp_stream_free(stream); + + stream->out = new->out; + stream->in = new->in; + stream->outcnt = new->outcnt; + stream->incnt = new->incnt; + + new->out = NULL; + new->in = NULL; +} + static int sctp_send_reconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { @@ -128,7 +125,7 @@ static int sctp_send_reconf(struct sctp_association *asoc, int sctp_send_reset_streams(struct sctp_association *asoc, struct sctp_reset_streams *params) { - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u16 i, str_nums, *str_list; struct sctp_chunk *chunk; int retval = -EINVAL; @@ -214,6 +211,7 @@ out: int sctp_send_reset_assoc(struct sctp_association *asoc) { + struct sctp_stream *stream = &asoc->stream; struct sctp_chunk *chunk = NULL; int retval; __u16 i; @@ -230,8 +228,8 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) return -ENOMEM; /* Block further xmit of data until this request is completed */ - for (i = 0; i < asoc->stream->outcnt; i++) - asoc->stream->out[i].state = SCTP_STREAM_CLOSED; + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_CLOSED; asoc->strreset_chunk = chunk; sctp_chunk_hold(asoc->strreset_chunk); @@ -241,8 +239,8 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) sctp_chunk_put(asoc->strreset_chunk); asoc->strreset_chunk = NULL; - for (i = 0; i < asoc->stream->outcnt; i++) - asoc->stream->out[i].state = SCTP_STREAM_OPEN; + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; return retval; } @@ -255,7 +253,7 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) int sctp_send_add_streams(struct sctp_association *asoc, struct sctp_add_streams *params) { - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; struct sctp_chunk *chunk = NULL; int retval = -ENOMEM; __u32 outcnt, incnt; @@ -357,7 +355,7 @@ struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_ulpevent **evp) { struct sctp_strreset_outreq *outreq = param.v; - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u16 i, nums, flags = 0, *str_p = NULL; __u32 result = SCTP_STRRESET_DENIED; __u32 request_seq; @@ -449,7 +447,7 @@ struct sctp_chunk *sctp_process_strreset_inreq( struct sctp_ulpevent **evp) { struct sctp_strreset_inreq *inreq = param.v; - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; struct sctp_chunk *chunk = NULL; __u16 i, nums, *str_p; @@ -523,7 +521,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( { __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen; struct sctp_strreset_tsnreq *tsnreq = param.v; - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; __u32 request_seq; __u16 i; @@ -612,7 +610,7 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( struct sctp_ulpevent **evp) { struct sctp_strreset_addstrm *addstrm = param.v; - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; struct sctp_stream_in *streamin; __u32 request_seq, incnt; @@ -687,7 +685,7 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in( struct sctp_ulpevent **evp) { struct sctp_strreset_addstrm *addstrm = param.v; - struct sctp_stream *stream = asoc->stream; + struct sctp_stream *stream = &asoc->stream; __u32 result = SCTP_STRRESET_DENIED; struct sctp_stream_out *streamout; struct sctp_chunk *chunk = NULL; @@ -758,8 +756,8 @@ struct sctp_chunk *sctp_process_strreset_resp( union sctp_params param, struct sctp_ulpevent **evp) { + struct sctp_stream *stream = &asoc->stream; struct sctp_strreset_resp *resp = param.v; - struct sctp_stream *stream = asoc->stream; struct sctp_transport *t; __u16 i, nums, flags = 0; sctp_paramhdr_t *req; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index aa3624d50278..25f7e4140566 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -764,7 +764,7 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, __u16 sid, csid, cssn; sid = event->stream; - stream = ulpq->asoc->stream; + stream = &ulpq->asoc->stream; event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; @@ -858,7 +858,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, /* Note: The stream ID must be verified before this routine. */ sid = event->stream; ssn = event->ssn; - stream = ulpq->asoc->stream; + stream = &ulpq->asoc->stream; /* Is this the expected SSN for this stream ID? */ if (ssn != sctp_ssn_peek(stream, in, sid)) { @@ -893,7 +893,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) struct sk_buff_head *lobby = &ulpq->lobby; __u16 csid, cssn; - stream = ulpq->asoc->stream; + stream = &ulpq->asoc->stream; /* We are holding the chunks by stream, by SSN. */ skb_queue_head_init(&temp); @@ -958,7 +958,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) struct sctp_stream *stream; /* Note: The stream ID must be verified before this routine. */ - stream = ulpq->asoc->stream; + stream = &ulpq->asoc->stream; /* Is this an old SSN? If so ignore. */ if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid))) -- cgit v1.2.3 From ff356414dc006170153c79434eb81d130c03beec Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 31 May 2017 16:36:32 +0800 Subject: sctp: merge sctp_stream_new and sctp_stream_init Since last patch, sctp doesn't need to alloc memory for asoc->stream any more. sctp_stream_new and sctp_stream_init both are used to alloc memory for stream.in or stream.out, and their names are also confusing. This patch is to merge them into sctp_stream_init, and only pass stream and streamcnt parameters into it, instead of the whole asoc. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 4 ++-- net/sctp/associola.c | 3 ++- net/sctp/sm_make_chunk.c | 3 ++- net/sctp/stream.c | 33 +++++++++++---------------------- 4 files changed, 17 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index c8dbf410c4f5..5051317162df 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -377,8 +377,8 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } sctp_sender_hb_info_t; -int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); -int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); +int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, + gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6625b15ab81a..288c5e0cda5d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -246,7 +246,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; - if (sctp_stream_new(asoc, gfp)) + if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, + 0, gfp)) goto fail_init; /* Assume that peer would support both address types unless we are diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 244181413bca..bd439edf2d8a 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2454,7 +2454,8 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, * stream sequence number shall be set to 0. */ - if (sctp_stream_init(asoc, gfp)) + if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, + asoc->c.sinit_max_instreams, gfp)) goto clean_up; if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index af6b49850344..82e6d40052a8 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -35,47 +35,36 @@ #include #include -int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp) +int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, + gfp_t gfp) { - struct sctp_stream *stream = &asoc->stream; - int i; - - stream->outcnt = asoc->c.sinit_num_ostreams; - stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); - if (!stream->out) - return -ENOMEM; - - for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; - - return 0; -} - -int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) -{ - struct sctp_stream *stream = &asoc->stream; int i; /* Initial stream->out size may be very big, so free it and alloc * a new one with new outcnt to save memory. */ kfree(stream->out); - stream->outcnt = asoc->c.sinit_num_ostreams; - stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); + + stream->out = kcalloc(outcnt, sizeof(*stream->out), gfp); if (!stream->out) return -ENOMEM; + stream->outcnt = outcnt; for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; - stream->incnt = asoc->c.sinit_max_instreams; - stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); + if (!incnt) + return 0; + + stream->in = kcalloc(incnt, sizeof(*stream->in), gfp); if (!stream->in) { kfree(stream->out); stream->out = NULL; return -ENOMEM; } + stream->incnt = incnt; + return 0; } -- cgit v1.2.3 From 1bde33e051233f0ed93a8bc67137016ab38c3d2d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 2 Jun 2017 14:46:13 -0700 Subject: include/linux/gfp.h: fix ___GFP_NOLOCKDEP value Igor Stoppa has noticed that __GFP_NOLOCKDEP can use a lower bit. At the time commit 7e7844226f10 ("lockdep: allow to disable reclaim lockup detection") was written we still had __GFP_OTHER_NODE but I have removed it in commit 41b6167e8f74 ("mm: get rid of __GFP_OTHER_NODE") and forgot to lower the bit value. The current value is outside of __GFP_BITS_SHIFT so it cannot be used actually. Fixes: 7e7844226f10 ("lockdep: allow to disable reclaim lockup detection") Signed-off-by: Michal Hocko Reported-by: Igor Stoppa Acked-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2b1a44f5bdb6..a89d37e8b387 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -41,7 +41,7 @@ struct vm_area_struct; #define ___GFP_WRITE 0x800000u #define ___GFP_KSWAPD_RECLAIM 0x1000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +#define ___GFP_NOLOCKDEP 0x2000000u #else #define ___GFP_NOLOCKDEP 0 #endif -- cgit v1.2.3 From 60b0a8c3d2480f3b57282b47b7cae7ee71c48635 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Fri, 2 Jun 2017 14:46:16 -0700 Subject: frv: declare jiffies to be located in the .data section Commit 7c30f352c852 ("jiffies.h: declare jiffies and jiffies_64 with ____cacheline_aligned_in_smp") removed a section specification from the jiffies declaration that caused conflicts on some platforms. Unfortunately this change broke the build for frv: kernel/built-in.o: In function `__do_softirq': (.text+0x6460): relocation truncated to fit: R_FRV_GPREL12 against symbol `jiffies' defined in *ABS* section in .tmp_vmlinux1 kernel/built-in.o: In function `__do_softirq': (.text+0x6574): relocation truncated to fit: R_FRV_GPREL12 against symbol `jiffies' defined in *ABS* section in .tmp_vmlinux1 kernel/built-in.o: In function `pwq_activate_delayed_work': workqueue.c:(.text+0x15b9c): relocation truncated to fit: R_FRV_GPREL12 against symbol `jiffies' defined in *ABS* section in .tmp_vmlinux1 ... Add __jiffy_arch_data to the declaration of jiffies and use it on frv to include the section specification. For all other platforms __jiffy_arch_data (currently) has no effect. Fixes: 7c30f352c852 ("jiffies.h: declare jiffies and jiffies_64 with ____cacheline_aligned_in_smp") Link: http://lkml.kernel.org/r/20170516221333.177280-1-mka@chromium.org Signed-off-by: Matthias Kaehlcke Reported-by: Guenter Roeck Tested-by: Guenter Roeck Reviewed-by: David Howells Cc: Sudip Mukherjee Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/include/asm/timex.h | 6 ++++++ include/linux/jiffies.h | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h index a89bddefdacf..139093fab326 100644 --- a/arch/frv/include/asm/timex.h +++ b/arch/frv/include/asm/timex.h @@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void) #define vxtime_lock() do {} while (0) #define vxtime_unlock() do {} while (0) +/* This attribute is used in include/linux/jiffies.h alongside with + * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp + * for frv does not contain another section specification. + */ +#define __jiffy_arch_data __attribute__((__section__(".data"))) + #endif diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 36872fbb815d..734377ad42e9 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +#ifndef __jiffy_arch_data +#define __jiffy_arch_data +#endif + /* * The 64-bit value is not atomic - you MUST NOT read it * without sampling the sequence number in jiffies_lock. * get_jiffies_64() will do this for you as appropriate. */ extern u64 __cacheline_aligned_in_smp jiffies_64; -extern unsigned long volatile __cacheline_aligned_in_smp jiffies; +extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); -- cgit v1.2.3 From 9a291a7c9428155e8e623e4a3989f8be47134df5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 2 Jun 2017 14:46:46 -0700 Subject: mm/hugetlb: report -EHWPOISON not -EFAULT when FOLL_HWPOISON is specified KVM uses get_user_pages() to resolve its stage2 faults. KVM sets the FOLL_HWPOISON flag causing faultin_page() to return -EHWPOISON when it finds a VM_FAULT_HWPOISON. KVM handles these hwpoison pages as a special case. (check_user_page_hwpoison()) When huge pages are involved, this doesn't work so well. get_user_pages() calls follow_hugetlb_page(), which stops early if it receives VM_FAULT_HWPOISON from hugetlb_fault(), eventually returning -EFAULT to the caller. The step to map this to -EHWPOISON based on the FOLL_ flags is missing. The hwpoison special case is skipped, and -EFAULT is returned to user-space, causing Qemu or kvmtool to exit. Instead, move this VM_FAULT_ to errno mapping code into a header file and use it from faultin_page() and follow_hugetlb_page(). With this, KVM works as expected. This isn't a problem for arm64 today as we haven't enabled MEMORY_FAILURE, but I can't see any reason this doesn't happen on x86 too, so I think this should be a fix. This doesn't apply earlier than stable's v4.11.1 due to all sorts of cleanup. [james.morse@arm.com: add vm_fault_to_errno() call to faultin_page()] suggested. Link: http://lkml.kernel.org/r/20170525171035.16359-1-james.morse@arm.com [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20170524160900.28786-1-james.morse@arm.com Signed-off-by: James Morse Acked-by: Punit Agrawal Acked-by: Naoya Horiguchi Cc: "Kirill A . Shutemov" Cc: [4.11.1+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 11 +++++++++++ mm/gup.c | 20 ++++++++------------ mm/hugetlb.c | 5 +++++ 3 files changed, 24 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7cb17c6b97de..b892e95d4929 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ +static inline int vm_fault_to_errno(int vm_fault, int foll_flags) +{ + if (vm_fault & VM_FAULT_OOM) + return -ENOMEM; + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + return 0; +} + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, diff --git a/mm/gup.c b/mm/gup.c index d9e6fddcc51f..b3c7214d710d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { - if (ret & VM_FAULT_OOM) - return -ENOMEM; - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) - return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) - return -EFAULT; + int err = vm_fault_to_errno(ret, *flags); + + if (err) + return err; BUG(); } @@ -723,12 +721,10 @@ retry: ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { - if (ret & VM_FAULT_OOM) - return -ENOMEM; - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) - return -EHWPOISON; - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) - return -EFAULT; + int err = vm_fault_to_errno(ret, 0); + + if (err) + return err; BUG(); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e5828875f7bb..3eedb187e549 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, } ret = hugetlb_fault(mm, vma, vaddr, fault_flags); if (ret & VM_FAULT_ERROR) { + int err = vm_fault_to_errno(ret, flags); + + if (err) + return err; + remainder = 0; break; } -- cgit v1.2.3 From 864b9a393dcb5aed09b8fd31b9bbda0fdda99374 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 2 Jun 2017 14:46:49 -0700 Subject: mm: consider memblock reservations for deferred memory initialization sizing We have seen an early OOM killer invocation on ppc64 systems with crashkernel=4096M: kthreadd invoked oom-killer: gfp_mask=0x16040c0(GFP_KERNEL|__GFP_COMP|__GFP_NOTRACK), nodemask=7, order=0, oom_score_adj=0 kthreadd cpuset=/ mems_allowed=7 CPU: 0 PID: 2 Comm: kthreadd Not tainted 4.4.68-1.gd7fe927-default #1 Call Trace: dump_stack+0xb0/0xf0 (unreliable) dump_header+0xb0/0x258 out_of_memory+0x5f0/0x640 __alloc_pages_nodemask+0xa8c/0xc80 kmem_getpages+0x84/0x1a0 fallback_alloc+0x2a4/0x320 kmem_cache_alloc_node+0xc0/0x2e0 copy_process.isra.25+0x260/0x1b30 _do_fork+0x94/0x470 kernel_thread+0x48/0x60 kthreadd+0x264/0x330 ret_from_kernel_thread+0x5c/0xa4 Mem-Info: active_anon:0 inactive_anon:0 isolated_anon:0 active_file:0 inactive_file:0 isolated_file:0 unevictable:0 dirty:0 writeback:0 unstable:0 slab_reclaimable:5 slab_unreclaimable:73 mapped:0 shmem:0 pagetables:0 bounce:0 free:0 free_pcp:0 free_cma:0 Node 7 DMA free:0kB min:0kB low:0kB high:0kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:52428800kB managed:110016kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:320kB slab_unreclaimable:4672kB kernel_stack:1152kB pagetables:0kB unstable:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? yes lowmem_reserve[]: 0 0 0 0 Node 7 DMA: 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB 0*8192kB 0*16384kB = 0kB 0 total pagecache pages 0 pages in swap cache Swap cache stats: add 0, delete 0, find 0/0 Free swap = 0kB Total swap = 0kB 819200 pages RAM 0 pages HighMem/MovableOnly 817481 pages reserved 0 pages cma reserved 0 pages hwpoisoned the reason is that the managed memory is too low (only 110MB) while the rest of the the 50GB is still waiting for the deferred intialization to be done. update_defer_init estimates the initial memoty to initialize to 2GB at least but it doesn't consider any memory allocated in that range. In this particular case we've had Reserving 4096MB of memory at 128MB for crashkernel (System RAM: 51200MB) so the low 2GB is mostly depleted. Fix this by considering memblock allocations in the initial static initialization estimation. Move the max_initialise to reset_deferred_meminit and implement a simple memblock_reserved_memory helper which iterates all reserved blocks and sums the size of all that start below the given address. The cumulative size is than added on top of the initial estimation. This is still not ideal because reset_deferred_meminit doesn't consider holes and so reservation might be above the initial estimation whihch we ignore but let's make the logic simpler until we really need to handle more complicated cases. Fixes: 3a80a7fa7989 ("mm: meminit: initialise a subset of struct pages if CONFIG_DEFERRED_STRUCT_PAGE_INIT is set") Link: http://lkml.kernel.org/r/20170531104010.GI27783@dhcp22.suse.cz Signed-off-by: Michal Hocko Acked-by: Mel Gorman Tested-by: Srikar Dronamraju Cc: [4.2+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 8 ++++++++ include/linux/mmzone.h | 1 + mm/memblock.c | 23 +++++++++++++++++++++++ mm/page_alloc.c | 33 ++++++++++++++++++++++----------- 4 files changed, 54 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 4ce24a376262..8098695e5d8d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) } #endif +extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } +static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr) +{ + return 0; +} + #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ebaccd4e7d8c..ef6a13b7bd3e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -678,6 +678,7 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; + unsigned long static_init_size; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/mm/memblock.c b/mm/memblock.c index b049c9b2dba8..7b8a5db76a2f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type) } } +extern unsigned long __init_memblock +memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) +{ + struct memblock_region *rgn; + unsigned long size = 0; + int idx; + + for_each_memblock_type((&memblock.reserved), rgn) { + phys_addr_t start, end; + + if (rgn->base + rgn->size < start_addr) + continue; + if (rgn->base > end_addr) + continue; + + start = rgn->base; + end = start + rgn->size; + size += end - start; + } + + return size; +} + void __init_memblock __memblock_dump_all(void) { pr_info("MEMBLOCK configuration:\n"); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b7a6f583a373..2302f250d6b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static inline void reset_deferred_meminit(pg_data_t *pgdat) { + unsigned long max_initialise; + unsigned long reserved_lowmem; + + /* + * Initialise at least 2G of a node but also take into account that + * two large system hashes that can take up 1GB for 0.25TB/node. + */ + max_initialise = max(2UL << (30 - PAGE_SHIFT), + (pgdat->node_spanned_pages >> 8)); + + /* + * Compensate the all the memblock reservations (e.g. crash kernel) + * from the initial estimation to make sure we will initialize enough + * memory to boot. + */ + reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, + pgdat->node_start_pfn + max_initialise); + max_initialise += reserved_lowmem; + + pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); pgdat->first_deferred_pfn = ULONG_MAX; } @@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat, unsigned long pfn, unsigned long zone_end, unsigned long *nr_initialised) { - unsigned long max_initialise; - /* Always populate low zones for address-contrained allocations */ if (zone_end < pgdat_end_pfn(pgdat)) return true; - /* - * Initialise at least 2G of a node but also take into account that - * two large system hashes that can take up 1GB for 0.25TB/node. - */ - max_initialise = max(2UL << (30 - PAGE_SHIFT), - (pgdat->node_spanned_pages >> 8)); - (*nr_initialised)++; - if ((*nr_initialised > max_initialise) && + if ((*nr_initialised > pgdat->static_init_size) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; @@ -6138,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); - reset_deferred_meminit(pgdat); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; pgdat->per_cpu_nodestats = NULL; @@ -6160,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, (unsigned long)pgdat->node_mem_map); #endif + reset_deferred_meminit(pgdat); free_area_init_core(pgdat); } -- cgit v1.2.3 From e94c32818d865b620c9bbd2656ab3199259ef9ec Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 28 May 2017 05:58:04 -0300 Subject: [media] cec: rename MEDIA_CEC_NOTIFIER to CEC_NOTIFIER This config option is strictly speaking independent of the media subsystem since it can be used by drm as well. Besides, it looks odd when drivers select CEC_CORE and MEDIA_CEC_NOTIFIER, that's inconsistent naming. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/Kconfig | 2 +- drivers/media/cec/Makefile | 2 +- drivers/media/cec/cec-core.c | 4 ++-- drivers/media/platform/Kconfig | 4 ++-- include/media/cec-notifier.h | 2 +- include/media/cec.h | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 9ec634e2f2ba..55d9c2b82b7e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -5,7 +5,7 @@ config CEC_CORE tristate -config MEDIA_CEC_NOTIFIER +config CEC_NOTIFIER bool menuconfig MEDIA_SUPPORT diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile index 402a6c62a3e8..eaf408e64669 100644 --- a/drivers/media/cec/Makefile +++ b/drivers/media/cec/Makefile @@ -1,6 +1,6 @@ cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o -ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) +ifeq ($(CONFIG_CEC_NOTIFIER),y) cec-objs += cec-notifier.o endif diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c index f9ebff90f8eb..feeb4c5afa69 100644 --- a/drivers/media/cec/cec-core.c +++ b/drivers/media/cec/cec-core.c @@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode) put_device(&devnode->dev); } -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER static void cec_cec_notify(struct cec_adapter *adap, u16 pa) { cec_s_phys_addr(adap, pa, false); @@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap) adap->rc = NULL; #endif debugfs_remove_recursive(adap->cec_dir); -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER if (adap->notifier) cec_notifier_unregister(adap->notifier); #endif diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 017419bef9b1..041cb80a26b1 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -503,7 +503,7 @@ config VIDEO_SAMSUNG_S5P_CEC tristate "Samsung S5P CEC driver" depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST select CEC_CORE - select MEDIA_CEC_NOTIFIER + select CEC_NOTIFIER ---help--- This is a driver for Samsung S5P HDMI CEC interface. It uses the generic CEC framework interface. @@ -514,7 +514,7 @@ config VIDEO_STI_HDMI_CEC tristate "STMicroelectronics STiH4xx HDMI CEC driver" depends on ARCH_STI || COMPILE_TEST select CEC_CORE - select MEDIA_CEC_NOTIFIER + select CEC_NOTIFIER ---help--- This is a driver for STIH4xx HDMI CEC interface. It uses the generic CEC framework interface. diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index eb50ce54b759..413335c8cb52 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -29,7 +29,7 @@ struct edid; struct cec_adapter; struct cec_notifier; -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) /** * cec_notifier_get - find or create a new cec_notifier for the given device. diff --git a/include/media/cec.h b/include/media/cec.h index b8eb895731d5..bfa88d4d67e1 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -173,7 +173,7 @@ struct cec_adapter { bool passthrough; struct cec_log_addrs log_addrs; -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER struct cec_notifier *notifier; #endif @@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input); */ int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER void cec_register_cec_notifier(struct cec_adapter *adap, struct cec_notifier *notifier); #endif -- cgit v1.2.3 From 518d8a2e9bad83c6040eccebc3d1f7388fc034e7 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 1 Jun 2017 21:37:37 +0300 Subject: net/flow_dissector: add support for dissection of misc ip header fields Add support for dissection of ip tos and ttl and ipv6 traffic-class and hoplimit. Both are dissected into the same struct. Uses similar call to ip dissection function as with tcp, arp and others. Signed-off-by: Or Gerlitz Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 11 +++++++++++ net/core/flow_dissector.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) (limited to 'include') diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index efe34eec61dc..e2663e900b0a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -165,6 +165,16 @@ struct flow_dissector_key_tcp { __be16 flags; }; +/** + * struct flow_dissector_key_ip: + * @tos: tos + * @ttl: ttl + */ +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -186,6 +196,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ + FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5a45943081f5..fc5fc4594c90 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -367,6 +367,40 @@ __skb_flow_dissect_tcp(const struct sk_buff *skb, key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); } +static void +__skb_flow_dissect_ipv4(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, const struct iphdr *iph) +{ + struct flow_dissector_key_ip *key_ip; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) + return; + + key_ip = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IP, + target_container); + key_ip->tos = iph->tos; + key_ip->ttl = iph->ttl; +} + +static void +__skb_flow_dissect_ipv6(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, const struct ipv6hdr *iph) +{ + struct flow_dissector_key_ip *key_ip; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) + return; + + key_ip = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IP, + target_container); + key_ip->tos = ipv6_get_dsfield(iph); + key_ip->ttl = iph->hop_limit; +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -469,6 +503,9 @@ ip: } } + __skb_flow_dissect_ipv4(skb, flow_dissector, + target_container, data, iph); + if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) goto out_good; @@ -514,6 +551,9 @@ ipv6: goto out_good; } + __skb_flow_dissect_ipv6(skb, flow_dissector, + target_container, data, iph); + if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) goto out_good; -- cgit v1.2.3 From 4d80cc0aaaab9efac14c9d3d702b69961800de20 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 1 Jun 2017 21:37:38 +0300 Subject: net/sched: cls_flower: add support for matching on ip tos and ttl Benefit from the support of ip header fields dissection and allow users to set rules matching on ipv4 tos and ttl or ipv6 traffic-class and hoplimit. Signed-off-by: Or Gerlitz Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 5 +++++ net/sched/cls_flower.c | 39 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c6e8cf5e9c40..edf43ddf47b0 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -454,6 +454,11 @@ enum { TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */ TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */ + TCA_FLOWER_KEY_IP_TOS, /* u8 */ + TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */ + TCA_FLOWER_KEY_IP_TTL, /* u8 */ + TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index fb74a47830f4..33feaee197cf 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -50,6 +50,7 @@ struct fl_flow_key { struct flow_dissector_key_ports enc_tp; struct flow_dissector_key_mpls mpls; struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ip ip; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -427,6 +428,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -528,6 +533,19 @@ static int fl_set_key_flags(struct nlattr **tb, return 0; } +static void fl_set_key_ip(struct nlattr **tb, + struct flow_dissector_key_ip *key, + struct flow_dissector_key_ip *mask) +{ + fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS, + &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK, + sizeof(key->tos)); + + fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, + &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK, + sizeof(key->ttl)); +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask) { @@ -570,6 +588,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); + fl_set_key_ip(tb, &key->ip, &mask->ip); } if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { @@ -772,6 +791,8 @@ static void fl_init_dissector(struct cls_fl_head *head, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IP, ip); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_TCP, tcp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, @@ -1082,6 +1103,19 @@ static int fl_dump_key_mpls(struct sk_buff *skb, return 0; } +static int fl_dump_key_ip(struct sk_buff *skb, + struct flow_dissector_key_ip *key, + struct flow_dissector_key_ip *mask) +{ + if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos, + TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) || + fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl, + TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl))) + return -1; + + return 0; +} + static int fl_dump_key_vlan(struct sk_buff *skb, struct flow_dissector_key_vlan *vlan_key, struct flow_dissector_key_vlan *vlan_mask) @@ -1195,9 +1229,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && - fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, + (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, - sizeof(key->basic.ip_proto))) + sizeof(key->basic.ip_proto)) || + fl_dump_key_ip(skb, &key->ip, &mask->ip))) goto nla_put_failure; if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && -- cgit v1.2.3 From 14be36c2c96cd18cfa036f230b57ea78d82a303f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 2 Jun 2017 12:31:23 -0700 Subject: net: dsa: Initialize all CPU and enabled ports masks in dsa_ds_parse() There was no reason for duplicating the code that initializes ds->enabled_port_mask in both dsa_parse_ports_dn() and dsa_parse_ports(), instead move this to dsa_ds_parse() which is early enough before ops->setup() has run. While at it, we can now make dsa_is_cpu_port() check ds->cpu_port_mask which is a step towards being multi-CPU port capable. Signed-off-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 2 +- net/dsa/dsa2.c | 33 +++++++++++++++------------------ 2 files changed, 16 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 448d8bc77707..2effb0af9d7c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -254,7 +254,7 @@ struct dsa_switch { static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { - return ds->dst->cpu_dp == &ds->ports[p]; + return !!(ds->cpu_port_mask & (1 << p)); } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 067daec644c1..cd13bb54a30c 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -250,8 +250,6 @@ static int dsa_cpu_port_apply(struct dsa_port *port) return err; } - ds->cpu_port_mask |= BIT(port->index); - memset(&port->devlink_port, 0, sizeof(port->devlink_port)); err = devlink_port_register(ds->devlink, &port->devlink_port, port->index); @@ -522,6 +520,12 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index, dst->rcv = dst->tag_ops->rcv; + /* Initialize cpu_port_mask now for drv->setup() + * to have access to a correct value, just like what + * net/dsa/dsa.c::dsa_switch_setup_one does. + */ + ds->cpu_port_mask |= BIT(index); + return 0; } @@ -533,14 +537,22 @@ static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds) for (index = 0; index < ds->num_ports; index++) { port = &ds->ports[index]; - if (!dsa_port_is_valid(port)) + if (!dsa_port_is_valid(port) || + dsa_port_is_dsa(port)) continue; if (dsa_port_is_cpu(port)) { err = dsa_cpu_parse(port, index, dst, ds); if (err) return err; + } else { + /* Initialize enabled_port_mask now for drv->setup() + * to have access to a correct value, just like what + * net/dsa/dsa.c::dsa_switch_setup_one does. + */ + ds->enabled_port_mask |= BIT(index); } + } pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index); @@ -589,13 +601,6 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds) return -EINVAL; ds->ports[reg].dn = port; - - /* Initialize enabled_port_mask now for ops->setup() - * to have access to a correct value, just like what - * net/dsa/dsa.c::dsa_switch_setup_one does. - */ - if (!dsa_port_is_cpu(&ds->ports[reg])) - ds->enabled_port_mask |= 1 << reg; } return 0; @@ -611,14 +616,6 @@ static int dsa_parse_ports(struct dsa_chip_data *cd, struct dsa_switch *ds) continue; ds->ports[i].name = cd->port_names[i]; - - /* Initialize enabled_port_mask now for drv->setup() - * to have access to a correct value, just like what - * net/dsa/dsa.c::dsa_switch_setup_one does. - */ - if (!dsa_port_is_cpu(&ds->ports[i])) - ds->enabled_port_mask |= 1 << i; - valid_name_found = true; } -- cgit v1.2.3 From 5071034e4af709d6783b7d105dc296a5cc84739b Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 2 Jun 2017 09:01:49 -0700 Subject: neigh: Really delete an arp/neigh entry on "ip neigh delete" or "arp -d" The command # arp -s 62.2.0.1 a:b:c:d:e:f dev eth2 adds an entry like the following (listed by "arp -an") ? (62.2.0.1) at 0a:0b:0c:0d:0e:0f [ether] PERM on eth2 but the symmetric deletion command # arp -i eth2 -d 62.2.0.1 does not remove the PERM entry from the table, and instead leaves behind ? (62.2.0.1) at on eth2 The reason is that there is a refcnt of 1 for the arp_tbl itself (neigh_alloc starts off the entry with a refcnt of 1), thus the neigh_release() call from arp_invalidate() will (at best) just decrement the ref to 1, but will never actually free it from the table. To fix this, we need to do something like neigh_forced_gc: if the refcnt is 1 (i.e., on the table's ref), remove the entry from the table and free it. This patch refactors and shares common code between neigh_forced_gc and the newly added neigh_remove_one. A similar issue exists for IPv6 Neighbor Cache entries, and is fixed in a similar manner by this patch. Signed-off-by: Sowmini Varadhan Reviewed-by: Julian Anastasov Signed-off-by: David S. Miller --- include/net/neighbour.h | 1 + net/core/neighbour.c | 60 ++++++++++++++++++++++++++++++++++++++++--------- net/ipv4/arp.c | 4 ++++ 3 files changed, 54 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/neighbour.h b/include/net/neighbour.h index e4dd3a214034..639b67564a7d 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -317,6 +317,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid); void __neigh_set_probe_once(struct neighbour *neigh); +bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d274f81fcc2c..dadb5eef91c3 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -118,6 +118,50 @@ unsigned long neigh_rand_reach_time(unsigned long base) EXPORT_SYMBOL(neigh_rand_reach_time); +static bool neigh_del(struct neighbour *n, __u8 state, + struct neighbour __rcu **np, struct neigh_table *tbl) +{ + bool retval = false; + + write_lock(&n->lock); + if (atomic_read(&n->refcnt) == 1 && !(n->nud_state & state)) { + struct neighbour *neigh; + + neigh = rcu_dereference_protected(n->next, + lockdep_is_held(&tbl->lock)); + rcu_assign_pointer(*np, neigh); + n->dead = 1; + retval = true; + } + write_unlock(&n->lock); + if (retval) + neigh_cleanup_and_release(n); + return retval; +} + +bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) +{ + struct neigh_hash_table *nht; + void *pkey = ndel->primary_key; + u32 hash_val; + struct neighbour *n; + struct neighbour __rcu **np; + + nht = rcu_dereference_protected(tbl->nht, + lockdep_is_held(&tbl->lock)); + hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); + hash_val = hash_val >> (32 - nht->hash_shift); + + np = &nht->hash_buckets[hash_val]; + while ((n = rcu_dereference_protected(*np, + lockdep_is_held(&tbl->lock)))) { + if (n == ndel) + return neigh_del(n, 0, np, tbl); + np = &n->next; + } + return false; +} + static int neigh_forced_gc(struct neigh_table *tbl) { int shrunk = 0; @@ -140,19 +184,10 @@ static int neigh_forced_gc(struct neigh_table *tbl) * - nobody refers to it. * - it is not permanent */ - write_lock(&n->lock); - if (atomic_read(&n->refcnt) == 1 && - !(n->nud_state & NUD_PERMANENT)) { - rcu_assign_pointer(*np, - rcu_dereference_protected(n->next, - lockdep_is_held(&tbl->lock))); - n->dead = 1; - shrunk = 1; - write_unlock(&n->lock); - neigh_cleanup_and_release(n); + if (neigh_del(n, NUD_PERMANENT, np, tbl)) { + shrunk = 1; continue; } - write_unlock(&n->lock); np = &n->next; } } @@ -1649,7 +1684,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, NETLINK_CB(skb).portid); + write_lock_bh(&tbl->lock); neigh_release(neigh); + neigh_remove_one(neigh, tbl); + write_unlock_bh(&tbl->lock); out: return err; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index e9f3386a528b..a651c53260ec 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1113,13 +1113,17 @@ static int arp_invalidate(struct net_device *dev, __be32 ip) { struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); int err = -ENXIO; + struct neigh_table *tbl = &arp_tbl; if (neigh) { if (neigh->nud_state & ~NUD_NOARP) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE| NEIGH_UPDATE_F_ADMIN, 0); + write_lock_bh(&tbl->lock); neigh_release(neigh); + neigh_remove_one(neigh, tbl); + write_unlock_bh(&tbl->lock); } return err; -- cgit v1.2.3 From f91840a32deef5cb1bf73338bc5010f843b01426 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 2 Jun 2017 21:03:52 -0700 Subject: perf, bpf: Add BPF support to all perf_event types Allow BPF_PROG_TYPE_PERF_EVENT program types to attach to all perf_event types, including HW_CACHE, RAW, and dynamic pmu events. Only tracepoint/kprobe events are treated differently which require BPF_PROG_TYPE_TRACEPOINT/BPF_PROG_TYPE_KPROBE program types accordingly. Also add support for reading all event counters using bpf_perf_event_read() helper. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/perf_event.h | 7 +++++-- kernel/bpf/arraymap.c | 28 +++++++-------------------- kernel/events/core.c | 47 +++++++++++++++++++++++++++------------------- kernel/trace/bpf_trace.c | 22 ++++++++-------------- 4 files changed, 48 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 24a635887f28..8fc5f0fada5e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -extern u64 perf_event_read_local(struct perf_event *event); +int perf_event_read_local(struct perf_event *event, u64 *value); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * { return ERR_PTR(-EINVAL); } -static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } +static inline int perf_event_read_local(struct perf_event *event, u64 *value) +{ + return -EINVAL; +} static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 172dc8ee0e3b..ecb43542246e 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -452,38 +452,24 @@ static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) static void *perf_event_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { - const struct perf_event_attr *attr; struct bpf_event_entry *ee; struct perf_event *event; struct file *perf_file; + u64 value; perf_file = perf_event_get(fd); if (IS_ERR(perf_file)) return perf_file; + ee = ERR_PTR(-EOPNOTSUPP); event = perf_file->private_data; - ee = ERR_PTR(-EINVAL); - - attr = perf_event_attrs(event); - if (IS_ERR(attr) || attr->inherit) + if (perf_event_read_local(event, &value) == -EOPNOTSUPP) goto err_out; - switch (attr->type) { - case PERF_TYPE_SOFTWARE: - if (attr->config != PERF_COUNT_SW_BPF_OUTPUT) - goto err_out; - /* fall-through */ - case PERF_TYPE_RAW: - case PERF_TYPE_HARDWARE: - ee = bpf_event_entry_gen(perf_file, map_file); - if (ee) - return ee; - ee = ERR_PTR(-ENOMEM); - /* fall-through */ - default: - break; - } - + ee = bpf_event_entry_gen(perf_file, map_file); + if (ee) + return ee; + ee = ERR_PTR(-ENOMEM); err_out: fput(perf_file); return ee; diff --git a/kernel/events/core.c b/kernel/events/core.c index 6e75a5c9412d..51e40e4876c0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event) * will not be local and we cannot read them atomically * - must not have a pmu::count method */ -u64 perf_event_read_local(struct perf_event *event) +int perf_event_read_local(struct perf_event *event, u64 *value) { unsigned long flags; - u64 val; + int ret = 0; /* * Disabling interrupts avoids all counter scheduling (context @@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event) */ local_irq_save(flags); - /* If this is a per-task event, it must be for current */ - WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && - event->hw.target != current); - - /* If this is a per-CPU event, it must be for this CPU */ - WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && - event->cpu != smp_processor_id()); - /* * It must not be an event with inherit set, we cannot read * all child counters from atomic context. */ - WARN_ON_ONCE(event->attr.inherit); + if (event->attr.inherit) { + ret = -EOPNOTSUPP; + goto out; + } /* * It must not have a pmu::count method, those are not * NMI safe. */ - WARN_ON_ONCE(event->pmu->count); + if (event->pmu->count) { + ret = -EOPNOTSUPP; + goto out; + } + + /* If this is a per-task event, it must be for current */ + if ((event->attach_state & PERF_ATTACH_TASK) && + event->hw.target != current) { + ret = -EINVAL; + goto out; + } + + /* If this is a per-CPU event, it must be for this CPU */ + if (!(event->attach_state & PERF_ATTACH_TASK) && + event->cpu != smp_processor_id()) { + ret = -EINVAL; + goto out; + } /* * If the event is currently on this CPU, its either a per-task event, @@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event) if (event->oncpu == smp_processor_id()) event->pmu->read(event); - val = local64_read(&event->count); + *value = local64_read(&event->count); +out: local_irq_restore(flags); - return val; + return ret; } static int perf_event_read(struct perf_event *event, bool group) @@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) bool is_kprobe, is_tracepoint; struct bpf_prog *prog; - if (event->attr.type == PERF_TYPE_HARDWARE || - event->attr.type == PERF_TYPE_SOFTWARE) - return perf_event_set_bpf_handler(event, prog_fd); - if (event->attr.type != PERF_TYPE_TRACEPOINT) - return -EINVAL; + return perf_event_set_bpf_handler(event, prog_fd); if (event->tp_event->prog) return -EEXIST; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 460a031c77e5..08eb072430b9 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -234,7 +234,8 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) unsigned int cpu = smp_processor_id(); u64 index = flags & BPF_F_INDEX_MASK; struct bpf_event_entry *ee; - struct perf_event *event; + u64 value = 0; + int err; if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; @@ -247,21 +248,14 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) if (!ee) return -ENOENT; - event = ee->event; - if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && - event->attr.type != PERF_TYPE_RAW)) - return -EINVAL; - - /* make sure event is local and doesn't have pmu::count */ - if (unlikely(event->oncpu != cpu || event->pmu->count)) - return -EINVAL; - + err = perf_event_read_local(ee->event, &value); /* - * we don't know if the function is run successfully by the - * return value. It can be judged in other places, such as - * eBPF programs. + * this api is ugly since we miss [-22..-2] range of valid + * counter values, but that's uapi */ - return perf_event_read_local(event); + if (err) + return err; + return value; } static const struct bpf_func_proto bpf_perf_event_read_proto = { -- cgit v1.2.3 From b7d3ed5be9bd7e0689eee0f0f36702937cd8f7c8 Mon Sep 17 00:00:00 2001 From: Teng Qin Date: Fri, 2 Jun 2017 21:03:54 -0700 Subject: bpf: update perf event helper functions documentation This commit updates documentation of the bpf_perf_event_output and bpf_perf_event_read helpers to match their implementation. Signed-off-by: Teng Qin Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 11 +++++++---- tools/include/uapi/linux/bpf.h | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 94dfa9def355..e78aece03628 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -313,8 +313,11 @@ union bpf_attr { * @flags: room for future extensions * Return: 0 on success or negative error * - * u64 bpf_perf_event_read(&map, index) - * Return: Number events read or error code + * u64 bpf_perf_event_read(map, flags) + * read perf event counter value + * @map: pointer to perf_event_array map + * @flags: index of event in the map or bitmask flags + * Return: value of perf event counter read or error code * * int bpf_redirect(ifindex, flags) * redirect to another netdev @@ -328,11 +331,11 @@ union bpf_attr { * @skb: pointer to skb * Return: realm if != 0 * - * int bpf_perf_event_output(ctx, map, index, data, size) + * int bpf_perf_event_output(ctx, map, flags, data, size) * output perf raw sample * @ctx: struct pt_regs* * @map: pointer to perf_event_array map - * @index: index of event in the map + * @flags: index of event in the map or bitmask flags * @data: data on stack to be output as raw data * @size: size of data * Return: 0 on success or negative error diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 94dfa9def355..e78aece03628 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -313,8 +313,11 @@ union bpf_attr { * @flags: room for future extensions * Return: 0 on success or negative error * - * u64 bpf_perf_event_read(&map, index) - * Return: Number events read or error code + * u64 bpf_perf_event_read(map, flags) + * read perf event counter value + * @map: pointer to perf_event_array map + * @flags: index of event in the map or bitmask flags + * Return: value of perf event counter read or error code * * int bpf_redirect(ifindex, flags) * redirect to another netdev @@ -328,11 +331,11 @@ union bpf_attr { * @skb: pointer to skb * Return: realm if != 0 * - * int bpf_perf_event_output(ctx, map, index, data, size) + * int bpf_perf_event_output(ctx, map, flags, data, size) * output perf raw sample * @ctx: struct pt_regs* * @map: pointer to perf_event_array map - * @index: index of event in the map + * @flags: index of event in the map or bitmask flags * @data: data on stack to be output as raw data * @size: size of data * Return: 0 on success or negative error -- cgit v1.2.3 From 77d4b1d36926a9b8387c6b53eeba42bcaaffcea3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 3 Jun 2017 09:29:25 -0700 Subject: net: ping: do not abuse udp_poll() Alexander reported various KASAN messages triggered in recent kernels The problem is that ping sockets should not use udp_poll() in the first place, and recent changes in UDP stack finally exposed this old bug. Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind") Fixes: 6d0bfe226116 ("net: ipv6: Add IPv6 support to the ping socket.") Signed-off-by: Eric Dumazet Reported-by: Sasha Levin Cc: Solar Designer Cc: Vasiliy Kulikov Cc: Lorenzo Colitti Acked-By: Lorenzo Colitti Tested-By: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/ipv6.h | 1 + net/ipv4/af_inet.c | 2 +- net/ipv6/ping.c | 2 +- net/ipv6/raw.c | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index dbf0abba33b8..3e505bbff8ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, */ extern const struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; +extern const struct proto_ops inet6_sockraw_ops; struct group_source_req; struct group_filter; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f3dad1661343..58925b6597de 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] = .type = SOCK_DGRAM, .protocol = IPPROTO_ICMP, .prot = &ping_prot, - .ops = &inet_dgram_ops, + .ops = &inet_sockraw_ops, .flags = INET_PROTOSW_REUSE, }, diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 9b522fa90e6d..ac826dd338ff 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_ICMPV6, .prot = &pingv6_prot, - .ops = &inet6_dgram_ops, + .ops = &inet6_sockraw_ops, .flags = INET_PROTOSW_REUSE, }; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1f992d9e261d..60be012fe708 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1338,7 +1338,7 @@ void raw6_proc_exit(void) #endif /* CONFIG_PROC_FS */ /* Same as inet6_dgram_ops, sans udp_poll. */ -static const struct proto_ops inet6_sockraw_ops = { +const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, -- cgit v1.2.3 From 48a1df65334b74bd7531f932cca5928932abf769 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 4 Jun 2017 04:16:22 +0200 Subject: skbuff: return -EMSGSIZE in skb_to_sgvec to prevent overflow This is a defense-in-depth measure in response to bugs like 4d6fa57b4dab ("macsec: avoid heap overflow in skb_to_sgvec"). There's not only a potential overflow of sglist items, but also a stack overflow potential, so we fix this by limiting the amount of recursion this function is allowed to do. Not actually providing a bounded base case is a future disaster that we can easily avoid here. As a small matter of house keeping, we take this opportunity to move the documentation comment over the actual function the documentation is for. While this could be implemented by using an explicit stack of skbuffs, when implementing this, the function complexity increased considerably, and I don't think such complexity and bloat is actually worth it. So, instead I built this and tested it on x86, x86_64, ARM, ARM64, and MIPS, and measured the stack usage there. I also reverted the recent MIPS changes that give it a separate IRQ stack, so that I could experience some worst-case situations. I found that limiting it to 24 layers deep yielded a good stack usage with room for safety, as well as being much deeper than any driver actually ever creates. Signed-off-by: Jason A. Donenfeld Cc: Steffen Klassert Cc: Herbert Xu Cc: "David S. Miller" Cc: David Howells Cc: Sabrina Dubroca Cc: "Michael S. Tsirkin" Cc: Jason Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 8 +++---- net/core/skbuff.c | 65 ++++++++++++++++++++++++++++++++------------------ 2 files changed, 46 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 45a59c1e0cc7..d460a4cbda1c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -953,10 +953,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); -int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, - int len); +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 780b7c1563d0..bba33cf4f7cd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3508,24 +3508,18 @@ void __init skb_init(void) NULL); } -/** - * skb_to_sgvec - Fill a scatter-gather list from a socket buffer - * @skb: Socket buffer containing the buffers to be mapped - * @sg: The scatter-gather list to map into - * @offset: The offset into the buffer's contents to start mapping - * @len: Length of buffer space to be mapped - * - * Fill the specified scatter-gather list with mappings/pointers into a - * region of the buffer space attached to a socket buffer. - */ static int -__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) +__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, + unsigned int recursion_level) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int elt = 0; + if (unlikely(recursion_level >= 24)) + return -EMSGSIZE; + if (copy > 0) { if (copy > len) copy = len; @@ -3544,6 +3538,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) + return -EMSGSIZE; if (copy > len) copy = len; @@ -3558,16 +3554,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) } skb_walk_frags(skb, frag_iter) { - int end; + int end, ret; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) + return -EMSGSIZE; + if (copy > len) copy = len; - elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, - copy); + ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, + copy, recursion_level + 1); + if (unlikely(ret < 0)) + return ret; + elt += ret; if ((len -= copy) == 0) return elt; offset += copy; @@ -3578,6 +3580,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) return elt; } +/** + * skb_to_sgvec - Fill a scatter-gather list from a socket buffer + * @skb: Socket buffer containing the buffers to be mapped + * @sg: The scatter-gather list to map into + * @offset: The offset into the buffer's contents to start mapping + * @len: Length of buffer space to be mapped + * + * Fill the specified scatter-gather list with mappings/pointers into a + * region of the buffer space attached to a socket buffer. Returns either + * the number of scatterlist items used, or -EMSGSIZE if the contents + * could not fit. + */ +int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) +{ + int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); + + if (nsg <= 0) + return nsg; + + sg_mark_end(&sg[nsg - 1]); + + return nsg; +} +EXPORT_SYMBOL_GPL(skb_to_sgvec); + /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given * sglist without mark the sg which contain last skb data as the end. * So the caller can mannipulate sg list as will when padding new data after @@ -3600,19 +3627,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { - return __skb_to_sgvec(skb, sg, offset, len); + return __skb_to_sgvec(skb, sg, offset, len, 0); } EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) -{ - int nsg = __skb_to_sgvec(skb, sg, offset, len); - sg_mark_end(&sg[nsg - 1]); - - return nsg; -} -EXPORT_SYMBOL_GPL(skb_to_sgvec); /** * skb_cow_data - Check that a socket buffer's data buffers are writable -- cgit v1.2.3 From f604b17d7fdef574792a7e0b39f1b926d6b43d9d Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:01 +0300 Subject: qed*: L2 interface to use the SB structures directly Part of an effort of a cleaner seperation between qed and the protocol drivers, the L2 interface is to use the SB structure for initialization purposes opaquely. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 32 +++++++++++++++------------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 24 +++++++++++++++------ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 13 +++++++++-- drivers/net/ethernet/qlogic/qed/qed_vf.c | 8 +++---- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- include/linux/qed/qed_eth_if.h | 3 +-- 6 files changed, 52 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9d5791155fcf..262b2ba13e79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -182,9 +182,15 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; p_cid->vf_qid = vf_qid; - p_cid->rel = *p_params; p_cid->p_owner = p_hwfn; + /* Fill in parameters */ + p_cid->rel.vport_id = p_params->vport_id; + p_cid->rel.queue_id = p_params->queue_id; + p_cid->rel.stats_id = p_params->stats_id; + p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->sb_idx = p_params->sb_idx; + /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { p_cid->abs = p_cid->rel; @@ -215,10 +221,6 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->abs.stats_id = p_cid->rel.stats_id; } - /* SBs relevant information was already provided as absolute */ - p_cid->abs.sb = p_cid->rel.sb; - p_cid->abs.sb_idx = p_cid->rel.sb_idx; - /* This is tricky - we're actually interested in whehter this is a PF * entry meant for the VF. */ @@ -235,7 +237,7 @@ out: p_cid->rel.queue_id, p_cid->abs.queue_id, p_cid->rel.stats_id, - p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); + p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); return p_cid; @@ -767,7 +769,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", p_cid->opaque_fid, p_cid->cid, - p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -783,8 +785,8 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); @@ -1001,8 +1003,8 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.tx_queue_start; p_ramrod->vport_id = p_cid->abs.vport_id; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); @@ -2279,9 +2281,9 @@ static int qed_start_rxq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } @@ -2329,9 +2331,9 @@ static int qed_start_txq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8606bbfa6612..6ad36449dae9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -279,14 +279,24 @@ void qed_reset_vport_stats(struct qed_dev *cdev); #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) +/* Almost identical to the qed_queue_start_common_params, + * but here we maintain the SB index in IGU CAM. + */ +struct qed_queue_cid_params { + u8 vport_id; + u16 queue_id; + u8 stats_id; +}; + struct qed_queue_cid { - /* 'Relative' is a relative term ;-). Usually the indices [not counting - * SBs] would be PF-relative, but there are some cases where that isn't - * the case - specifically for a PF configuring its VF indices it's - * possible some fields [E.g., stats-id] in 'rel' would already be abs. - */ - struct qed_queue_start_common_params rel; - struct qed_queue_start_common_params abs; + /* For stats-id, the `rel' is actually absolute as well */ + struct qed_queue_cid_params rel; + struct qed_queue_cid_params abs; + + /* These have no 'relative' meaning */ + u16 sb_igu_id; + u8 sb_idx; + u32 cid; u16 opaque_fid; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5ae8827534f8..498c83ebc385 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1951,6 +1951,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; struct qed_vf_q_info *p_queue; struct vfpf_start_rxq_tlv *req; + struct qed_sb_info sb_dummy; bool b_legacy_vf = false; int rc; @@ -1968,7 +1969,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, @@ -2273,6 +2277,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_txq_tlv *req; struct qed_vf_q_info *p_queue; + struct qed_sb_info sb_dummy; int rc; u16 pq; @@ -2290,7 +2295,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 29d74074238f..877d41e456e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -588,8 +588,8 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; @@ -697,8 +697,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 00c70625f8a4..ad1e24962bdb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1770,7 +1770,7 @@ static int qede_start_txq(struct qede_dev *edev, else params.queue_id = txq->index; - params.sb = fp->sb_info->igu_sb_id; + params.p_sb = fp->sb_info; params.sb_idx = sb_idx; rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, @@ -1849,7 +1849,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) memset(&q_params, 0, sizeof(q_params)); q_params.queue_id = rxq->rxq_id; q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; + q_params.p_sb = fp->sb_info; q_params.sb_idx = RX_PI; p_phys_table = diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d66d16a559e1..fd72056f8d49 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -47,8 +47,7 @@ struct qed_queue_start_common_params { /* Relative, but relevant only for PFs */ u8 stats_id; - /* These are always absolute */ - u16 sb; + struct qed_sb_info *p_sb; u8 sb_idx; }; -- cgit v1.2.3 From 08bc8f15e69cbd9f8e3d7bbba4814cec50d51cfe Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:06 +0300 Subject: qed: Multiple qzone queues for VFs This adds the infrastructure for supporting VFs that want to open multiple transmission queues on the same queue-zone. At this point, there are no VFs that actually request this functionality, but later patches would remedy that. a. VF and PF would communicate the capability during ACQUIRE; Legacy VFs would continue on behaving as they do today b. PF would communicate number of supported CIDs to the VF and would enforce said limitation c. Whenever VF passes a request for a given queue configuration it would also pass an associated index within said queue-zone Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 8 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 30 ++++-- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 136 ++++++++++++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 39 +++++++- drivers/net/ethernet/qlogic/qed/qed_vf.h | 32 ++++++- include/linux/qed/qed_if.h | 4 + 7 files changed, 215 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 25d5b91f7928..e201214764db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2116,8 +2116,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, 1); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 75643c322642..cffa8e7e539b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -198,10 +198,10 @@ static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { - /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ - if ((p_cid->vfid == QED_QUEUE_CID_SELF) && - IS_PF(p_hwfn->cdev)) - qed_cxt_release_cid(p_hwfn, p_cid->cid); + bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); + + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); /* For PF's VFs we maintain the index inside queue-zone in IOV */ if (p_cid->vfid == QED_QUEUE_CID_SELF) @@ -319,18 +319,30 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, struct qed_queue_cid_vf_params *p_vf_params) { struct qed_queue_cid *p_cid; + u8 vfid = QED_CXT_PF_CID; bool b_legacy_vf = false; u32 cid = 0; - /* Currently, PF doesn't need to allocate CIDs for any VF */ - if (p_vf_params) - b_legacy_vf = true; + /* In case of legacy VFs, The CID can be derived from the additional + * VF parameters - the VF assumes queue X uses CID X, so we can simply + * use the vf_qid for this purpose as well. + */ + if (p_vf_params) { + vfid = p_vf_params->vfid; + + if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { + b_legacy_vf = true; + cid = p_vf_params->vf_qid; + } + } + /* Get a unique firmware CID for this queue, in case it's a PF. * VF's don't need a CID as the queue configuration will be done * by PF. */ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { - if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { + if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &cid, vfid)) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return NULL; } @@ -339,7 +351,7 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, p_params, b_is_rx, p_vf_params); if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) - qed_cxt_release_cid(p_hwfn, cid); + _qed_cxt_release_cid(p_hwfn, cid, vfid); return p_cid; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index e6fb5684b8fd..c620a5fa250b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -47,12 +47,16 @@ static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { - u8 legacy = QED_QCID_LEGACY_VF_CID; + u8 legacy = 0; if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == ETH_HSI_VER_NO_PKT_LEN_TUNN) legacy |= QED_QCID_LEGACY_VF_RX_PROD; + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + legacy |= QED_QCID_LEGACY_VF_CID; + return legacy; } @@ -1413,6 +1417,10 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, p_req->num_vlan_filters); + p_resp->num_cids = + min_t(u8, p_req->num_cids, + p_hwfn->pf_params.eth_pf_params.num_vf_cons); + /* This isn't really needed/enforced, but some legacy VFs might depend * on the correct filling of this field. */ @@ -1424,10 +1432,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_sbs < p_req->num_sbs || p_resp->num_mac_filters < p_req->num_mac_filters || p_resp->num_vlan_filters < p_req->num_vlan_filters || - p_resp->num_mc_filters < p_req->num_mc_filters) { + p_resp->num_mc_filters < p_req->num_mc_filters || + p_resp->num_cids < p_req->num_cids) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", p_vf->abs_vf_id, p_req->num_rxqs, p_resp->num_rxqs, @@ -1439,7 +1448,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); /* Some legacy OSes are incapable of correctly handling this * failure. @@ -1555,6 +1566,12 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + /* Share our ability to use multiple queue-ids only with VFs + * that request it. + */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1977,10 +1994,37 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, bool b_is_tx) { - if (b_is_tx) - return QED_IOV_LEGACY_QID_TX; - else - return QED_IOV_LEGACY_QID_RX; + struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Search for the qid if the VF published its going to provide it */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { + if (b_is_tx) + return QED_IOV_LEGACY_QID_TX; + else + return QED_IOV_LEGACY_QID_RX; + } + + p_qid_tlv = (struct vfpf_qid_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + CHANNEL_TLV_QID); + if (!p_qid_tlv) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%2x]: Failed to provide qid\n", + p_vf->relative_vf_id); + + return QED_IOV_QID_INVALID; + } + + if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Provided qid out-of-bounds %02x\n", + p_vf->relative_vf_id, p_qid_tlv->qid); + return QED_IOV_QID_INVALID; + } + + return p_qid_tlv->qid; } static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, @@ -2006,7 +2050,12 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->rx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; vf_legacy = qed_vf_calculate_legacy(vf); @@ -2332,12 +2381,17 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, req = &mbx->req_virt->start_txq; if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, - QED_IOV_VALIDATE_Q_DISABLE) || + QED_IOV_VALIDATE_Q_NA) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->tx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; vf_legacy = qed_vf_calculate_legacy(vf); @@ -2388,17 +2442,33 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, - QED_IOV_VALIDATE_Q_ENABLE)) { + if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] Tried Closing Rx 0x%04x which is inactive\n", - vf->relative_vf_id, rxq_id); + "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", + vf->relative_vf_id, rxq_id, qid_usage_idx); return -EINVAL; } p_queue = &vf->vf_queues[rxq_id]; + /* We've validated the index and the existence of the active RXQ - + * now we need to make sure that it's using the correct qid. + */ + if (!p_queue->cids[qid_usage_idx].p_cid || + p_queue->cids[qid_usage_idx].b_is_tx) { + struct qed_queue_cid *p_cid; + + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", + vf->relative_vf_id, + rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); + return -EINVAL; + } + + /* Now that we know we have a valid Rx-queue - close it */ rc = qed_eth_rx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid, false, cqe_completion); @@ -2418,11 +2488,13 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, - QED_IOV_VALIDATE_Q_ENABLE)) + if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) return -EINVAL; p_queue = &vf->vf_queues[txq_id]; + if (!p_queue->cids[qid_usage_idx].p_cid || + !p_queue->cids[qid_usage_idx].b_is_tx) + return -EINVAL; rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); if (rc) @@ -2458,6 +2530,8 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, qid_usage_idx, req->cqe_completion); @@ -2494,6 +2568,8 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); if (!rc) @@ -2524,15 +2600,35 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; - /* Validate inputs */ - for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) + /* There shouldn't exist a VF that uses queue-qids yet uses this + * API with multiple Rx queues. Validate this. + */ + if ((vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] supports QIDs but sends multiple queues\n", + vf->relative_vf_id); + goto out; + } + + /* Validate inputs - for the legacy case this is still true since + * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. + */ + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { if (!qed_iov_validate_rxq(p_hwfn, vf, i, - QED_IOV_VALIDATE_Q_ENABLE)) { - DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", - vf->relative_vf_id, req->rx_qid, req->num_rxqs); + QED_IOV_VALIDATE_Q_NA) || + !vf->vf_queues[i].cids[qid_usage_idx].p_cid || + vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, + req->num_rxqs); goto out; } + } /* Prepare the handlers */ for (i = 0; i < req->num_rxqs; i++) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 480cd99c69b5..95f55ae2ee8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -151,6 +151,7 @@ struct qed_iov_vf_mbx { #define QED_IOV_LEGACY_QID_RX (0) #define QED_IOV_LEGACY_QID_TX (1) +#define QED_IOV_QID_INVALID (0xFE) struct qed_vf_queue_cid { bool b_is_tx; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 877d41e456e4..c0d2febad86a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -153,6 +153,22 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) return rc; } +static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Only add QIDs for the queue if it was negotiated with PF */ + if (!(p_iov->acquire_resp.pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); + p_qid_tlv->qid = p_cid->qid_usage_idx; +} + #define VF_ACQUIRE_THRESH 3 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, struct vf_pf_resc_request *p_req, @@ -160,7 +176,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, @@ -171,7 +187,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); /* humble our request */ p_req->num_txqs = p_resp->num_txqs; @@ -180,6 +197,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_req->num_mac_filters = p_resp->num_mac_filters; p_req->num_vlan_filters = p_resp->num_vlan_filters; p_req->num_mc_filters = p_resp->num_mc_filters; + p_req->num_cids = p_resp->num_cids; } static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) @@ -204,6 +222,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; @@ -307,6 +326,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) p_iov->b_pre_fp_hsi = true; + /* In case PF doesn't support multi-queue Tx, update the number of + * CIDs to reflect the number of queues [older PFs didn't fill that + * field]. + */ + if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; + /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; @@ -609,6 +635,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); } + + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -657,6 +686,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, req->num_rxqs = 1; req->cqe_completion = cqe_completion; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -700,6 +731,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, req->hw_sb = p_cid->sb_igu_id; req->sb_index = p_cid->sb_idx; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -749,6 +782,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index d7b9c90b2f60..9588ae779267 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -46,7 +46,8 @@ struct vf_pf_resc_request { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u16 padding; + u8 num_cids; + u8 padding; }; struct hw_sb_info { @@ -113,6 +114,11 @@ struct vfpf_acquire_tlv { struct vf_pf_vfdev_info { #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + /* A requirement for supporting multi-Tx queues on a single queue-zone, + * VF would pass qids as additional information whenever passing queue + * references. + */ +#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2) u64 capabilities; u8 fw_major; u8 fw_minor; @@ -185,6 +191,9 @@ struct pfvf_acquire_resp_tlv { */ #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + /* PF expects queues to be received with additional qids */ +#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3) + u16 db_size; u8 indices_per_sb; u8 os_type; @@ -221,7 +230,8 @@ struct pfvf_acquire_resp_tlv { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u8 padding[2]; + u8 num_cids; + u8 padding; } resc; u32 bulletin_size; @@ -234,6 +244,16 @@ struct pfvf_start_queue_resp_tlv { u8 padding[4]; }; +/* Extended queue information - additional index for reference inside qzone. + * If commmunicated between VF/PF, each TLV relating to queues should be + * extended by one such [or have a future base TLV that already contains info]. + */ +struct vfpf_qid_tlv { + struct channel_tlv tl; + u8 qid; + u8 padding[3]; +}; + /* Setup Queue */ struct vfpf_start_rxq_tlv { struct vfpf_first_tlv first_tlv; @@ -597,6 +617,8 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, + CHANNEL_TLV_RESERVED, + CHANNEL_TLV_QID, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -605,6 +627,12 @@ enum { CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, }; +/* Default number of CIDs [total of both Rx and Tx] to be requested + * by default, and maximum possible number. + */ +#define QED_ETH_VF_DEFAULT_NUM_CIDS (32) +#define QED_ETH_VF_MAX_NUM_CIDS (250) + /* This data is held in the qed_hwfn structure for VFs only. */ struct qed_vf_iov { union vfpf_tlvs *vf2pf_request; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 567ea3ea6c0e..74f6b99754aa 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -185,6 +185,10 @@ struct qed_eth_pf_params { */ u16 num_cons; + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + /* To enable arfs, previous to HW-init a positive number needs to be * set [as filters require allocated searcher ILT memory]. * This will set the maximal number of configured steering-filters. -- cgit v1.2.3 From cbb8a12c089c7f04b86d08d89bdab71ec9bff1f5 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:08 +0300 Subject: qed: VF XDP support The final addition on the qed front - - VFs would now require their PFs to provide multiple CIDs - Based on the availability of connections from PF, determine whether XDP is feasible and share it with qede via dev_info. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 19 +++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_vf.c | 13 ++++++++++--- drivers/net/ethernet/qlogic/qed/qed_vf.h | 12 ++++++++++++ include/linux/qed/qed_eth_if.h | 3 +++ 4 files changed, 40 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index cffa8e7e539b..cb8b05dbfc6e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2119,15 +2119,26 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + + info->xdp_supported = true; } else { - qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); - if (cdev->num_hwfns > 1) { - u8 queues = 0; + u16 total_cids = 0; + + /* Determine queues & XDP support */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u8 queues, cids; - qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); + qed_vf_get_num_cids(p_hwfn, &cids); + qed_vf_get_num_rxqs(p_hwfn, &queues); info->num_queues += queues; + total_cids += cids; } + /* Enable VF XDP in case PF guarntees sufficient connections */ + if (total_cids >= info->num_queues * 3) + info->xdp_supported = true; + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], (u8 *)&info->num_vlan_filters); qed_vf_get_num_mac_filters(&cdev->hwfns[0], diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index cb81c357bf62..1926d1ed439f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -291,9 +291,11 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; /* If we've mapped the doorbell bar, try using queue qids */ - if (p_iov->b_doorbell_bar) + if (p_iov->b_doorbell_bar) { req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | VFPF_ACQUIRE_CAP_QUEUE_QIDS; + p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; + } /* pf 2 vf bulletin board address */ req->bulletin_addr = p_iov->bulletin.phys; @@ -884,8 +886,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - qid, *pp_doorbell, resp->offset); + "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", + qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); @@ -1478,6 +1480,11 @@ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; } +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ + *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; +} + void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { memcpy(port_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b8ce4bef8c94..b65bbc54a097 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -731,6 +731,14 @@ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); +/** + * @brief Get number of available connections [both Rx and Tx] for VF + * + * @param p_hwfn + * @param num_cids - allocated number of connections + */ +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); + /** * @brief Get port mac address for VF * @@ -1010,6 +1018,10 @@ static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) { } +static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ +} + static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index fd72056f8d49..0eef0a2b1901 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -73,6 +73,9 @@ struct qed_dev_eth_info { /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; + + /* Might depend on available resources [in case of VF] */ + bool xdp_supported; }; struct qed_update_vport_rss_params { -- cgit v1.2.3 From 6dc06c08bef1c746ff8da33dab677cfbacdcad32 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Sun, 4 Jun 2017 14:30:07 +0300 Subject: net/mlx4: Fix the check in attaching steering rules Our previous patch (cited below) introduced a regression for RAW Eth QPs. Fix it by checking if the QP number provided by user-space exists, hence allowing steering rules to be added for valid QPs only. Fixes: 89c557687a32 ("net/mlx4_en: Avoid adding steering rules with invalid ring") Reported-by: Or Gerlitz Signed-off-by: Talat Batheesh Signed-off-by: Tariq Toukan Acked-by: Or Gerlitz Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 5 ----- drivers/net/ethernet/mellanox/mlx4/mcg.c | 15 +++++++++++---- drivers/net/ethernet/mellanox/mlx4/qp.c | 13 +++++++++++++ include/linux/mlx4/qp.h | 1 + 4 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae5fdc2df654..ffbcb27c05e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev, qpn = priv->drop_qp.qpn; else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); - if (qpn < priv->rss_map.base_qpn || - qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { - en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); - return -EINVAL; - } } else { if (cmd->fs.ring_cookie >= priv->rx_ring_num) { en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1a670b681555..0710b3677464 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -35,6 +35,7 @@ #include #include +#include #include #include "mlx4.h" @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (!mlx4_qp_lookup(dev, rule->qpn)) { + mlx4_err_rule(dev, "QP doesn't exist\n", rule); + ret = -EINVAL; + goto out; + } + trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); list_for_each_entry(cur, &rule->list, list) { ret = parse_trans_rule(dev, cur, mailbox->buf + size); - if (ret < 0) { - mlx4_free_cmd_mailbox(dev, mailbox); - return ret; - } + if (ret < 0) + goto out; + size += ret; } @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, } } +out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2d6abd4662b1..ad92d2311478 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) __mlx4_qp_free_icm(dev, qpn); } +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + + spin_unlock(&qp_table->lock); + return qp; +} + int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index b4ee8f62ce8d..8e2828d48d7f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -470,6 +470,7 @@ struct mlx4_update_qp_params { u16 rate_val; }; +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); -- cgit v1.2.3 From 4722974d90e06d0164ca1b73a6b34cec6bdb64ad Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 5 Jun 2017 14:30:49 +0100 Subject: rxrpc: Implement service upgrade Implement AuriStor's service upgrade facility. There are three problems that this is meant to deal with: (1) Various of the standard AFS RPC calls have IPv4 addresses in their requests and/or replies - but there's no room for including IPv6 addresses. (2) Definition of IPv6-specific RPC operations in the standard operation sets has not yet been achieved. (3) One could envision the creation a new service on the same port that as the original service. The new service could implement improved operations - and the client could try this first, falling back to the original service if it's not there. Unfortunately, certain servers ignore packets addressed to a service they don't implement and don't respond in any way - not even with an ABORT. This means that the client must then wait for the call timeout to occur. What service upgrade does is to see if the connection is marked as being 'upgradeable' and if so, change the service ID in the server and thus the request and reply formats. Note that the upgrade isn't mandatory - a server that supports only the original call set will ignore the upgrade request. In the protocol, the procedure is then as follows: (1) To request an upgrade, the first DATA packet in a new connection must have the userStatus set to 1 (this is normally 0). The userStatus value is normally ignored by the server. (2) If the server doesn't support upgrading, the reply packets will contain the same service ID as for the first request packet. (3) If the server does support upgrading, all future reply packets on that connection will contain the new service ID and the new service ID will be applied to *all* further calls on that connection as well. (4) The RPC op used to probe the upgrade must take the same request data as the shadow call in the upgrade set (but may return a different reply). GetCapability RPC ops were added to all standard sets for just this purpose. Ops where the request formats differ cannot be used for probing. (5) The client must wait for completion of the probe before sending any further RPC ops to the same destination. It should then use the service ID that recvmsg() reported back in all future calls. (6) The shadow service must have call definitions for all the operation IDs defined by the original service. To support service upgrading, a server should: (1) Call bind() twice on its AF_RXRPC socket before calling listen(). Each bind() should supply a different service ID, but the transport addresses must be the same. This allows the server to receive requests with either service ID. (2) Enable automatic upgrading by calling setsockopt(), specifying RXRPC_UPGRADEABLE_SERVICE and passing in a two-member array of unsigned shorts as the argument: unsigned short optval[2]; This specifies a pair of service IDs. They must be different and must match the service IDs bound to the socket. Member 0 is the service ID to upgrade from and member 1 is the service ID to upgrade to. Signed-off-by: David Howells --- Documentation/networking/rxrpc.txt | 34 ++++++++++++++++++++++++++-------- include/linux/rxrpc.h | 1 + include/rxrpc/packet.h | 2 ++ net/rxrpc/af_rxrpc.c | 23 +++++++++++++++++++++++ net/rxrpc/ar-internal.h | 10 ++++++++-- net/rxrpc/call_accept.c | 2 +- net/rxrpc/conn_service.c | 11 ++++++++++- 7 files changed, 71 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index b7115ec55e04..2a1662760450 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -433,6 +433,13 @@ AF_RXRPC sockets support a few socket options at the SOL_RXRPC level: Encrypted checksum plus entire packet padded and encrypted, including actual packet length. + (*) RXRPC_UPGRADEABLE_SERVICE + + This is used to indicate that a service socket with two bindings may + upgrade one bound service to the other if requested by the client. optval + must point to an array of two unsigned short ints. The first is the + service ID to upgrade from and the second the service ID to upgrade to. + ======== SECURITY @@ -588,7 +595,7 @@ A server would be set up to accept operations in the following manner: The keyring can be manipulated after it has been given to the socket. This permits the server to add more keys, replace keys, etc. whilst it is live. - (2) A local address must then be bound: + (3) A local address must then be bound: struct sockaddr_rxrpc srx = { .srx_family = AF_RXRPC, @@ -604,11 +611,22 @@ A server would be set up to accept operations in the following manner: parameters are the same. The limit is currently two. To do this, bind() should be called twice. - (3) The server is then set to listen out for incoming calls: + (4) If service upgrading is required, first two service IDs must have been + bound and then the following option must be set: + + unsigned short service_ids[2] = { from_ID, to_ID }; + setsockopt(server, SOL_RXRPC, RXRPC_UPGRADEABLE_SERVICE, + service_ids, sizeof(service_ids)); + + This will automatically upgrade connections on service from_ID to service + to_ID if they request it. This will be reflected in msg_name obtained + through recvmsg() when the request data is delivered to userspace. + + (5) The server is then set to listen out for incoming calls: listen(server, 100); - (4) The kernel notifies the server of pending incoming connections by sending + (6) The kernel notifies the server of pending incoming connections by sending it a message for each. This is received with recvmsg() on the server socket. It has no data, and has a single dataless control message attached: @@ -620,13 +638,13 @@ A server would be set up to accept operations in the following manner: the time it is accepted - in which case the first call still on the queue will be accepted. - (5) The server then accepts the new call by issuing a sendmsg() with two + (7) The server then accepts the new call by issuing a sendmsg() with two pieces of control data and no actual data: RXRPC_ACCEPT - indicate connection acceptance RXRPC_USER_CALL_ID - specify user ID for this call - (6) The first request data packet will then be posted to the server socket for + (8) The first request data packet will then be posted to the server socket for recvmsg() to pick up. At that point, the RxRPC address for the call can be read from the address fields in the msghdr struct. @@ -638,7 +656,7 @@ A server would be set up to accept operations in the following manner: RXRPC_USER_CALL_ID - specifies the user ID for this call - (8) The reply data should then be posted to the server socket using a series + (9) The reply data should then be posted to the server socket using a series of sendmsg() calls, each with the following control messages attached: RXRPC_USER_CALL_ID - specifies the user ID for this call @@ -646,7 +664,7 @@ A server would be set up to accept operations in the following manner: MSG_MORE should be set in msghdr::msg_flags on all but the last message for a particular call. - (9) The final ACK from the client will be posted for retrieval by recvmsg() +(10) The final ACK from the client will be posted for retrieval by recvmsg() when it is received. It will take the form of a dataless message with two control messages attached: @@ -656,7 +674,7 @@ A server would be set up to accept operations in the following manner: MSG_EOR will be flagged to indicate that this is the final message for this call. -(10) Up to the point the final packet of reply data is sent, the call can be +(11) Up to the point the final packet of reply data is sent, the call can be aborted by calling sendmsg() with a dataless message with the following control messages attached: diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index c68307bc306f..634116561a6a 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -37,6 +37,7 @@ struct sockaddr_rxrpc { #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ #define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ +#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ /* * RxRPC control messages diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 703a64b4681a..a2dcfb850b9f 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -58,6 +58,8 @@ struct rxrpc_wire_header { #define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */ uint8_t userStatus; /* app-layer defined status */ +#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */ + uint8_t securityIndex; /* security protocol ID */ union { __be16 _rsvd; /* reserved */ diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 3b982bca7d22..0c4dc4a7832c 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -490,6 +490,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); unsigned int min_sec_level; + u16 service_upgrade[2]; int ret; _enter(",%d,%d,,%d", level, optname, optlen); @@ -546,6 +547,28 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, rx->min_sec_level = min_sec_level; goto success; + case RXRPC_UPGRADEABLE_SERVICE: + ret = -EINVAL; + if (optlen != sizeof(service_upgrade) || + rx->service_upgrade.from != 0) + goto error; + ret = -EISCONN; + if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) + goto error; + ret = -EFAULT; + if (copy_from_user(service_upgrade, optval, + sizeof(service_upgrade)) != 0) + goto error; + ret = -EINVAL; + if ((service_upgrade[0] != rx->srx.srx_service || + service_upgrade[1] != rx->second_service) && + (service_upgrade[0] != rx->second_service || + service_upgrade[1] != rx->srx.srx_service)) + goto error; + rx->service_upgrade.from = service_upgrade[0]; + rx->service_upgrade.to = service_upgrade[1]; + goto success; + default: break; } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 781fbc253b5a..c1ebd886a53f 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -144,8 +144,13 @@ struct rxrpc_sock { #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT bool exclusive; /* Exclusive connection for a client socket */ u16 second_service; /* Additional service bound to the endpoint */ + struct { + /* Service upgrade information */ + u16 from; /* Service ID to upgrade (if not 0) */ + u16 to; /* service ID to upgrade to */ + } service_upgrade; sa_family_t family; /* Protocol family created with */ - struct sockaddr_rxrpc srx; /* local address */ + struct sockaddr_rxrpc srx; /* Primary Service/local addresses */ struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ }; @@ -861,7 +866,8 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn) struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, struct sk_buff *); struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); -void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *); +void rxrpc_new_incoming_connection(struct rxrpc_sock *, + struct rxrpc_connection *, struct sk_buff *); void rxrpc_unpublish_service_conn(struct rxrpc_connection *); /* diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 544df53ccf79..0d4d84e8c074 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -296,7 +296,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, conn->params.local = local; conn->params.peer = peer; rxrpc_see_connection(conn); - rxrpc_new_incoming_connection(conn, skb); + rxrpc_new_incoming_connection(rx, conn, skb); } else { rxrpc_get_connection(conn); } diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index c7f8682a55b2..e60fcd2a4a02 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -150,7 +150,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn * Set up an incoming connection. This is called in BH context with the RCU * read lock held. */ -void rxrpc_new_incoming_connection(struct rxrpc_connection *conn, +void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, + struct rxrpc_connection *conn, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); @@ -168,6 +169,14 @@ void rxrpc_new_incoming_connection(struct rxrpc_connection *conn, else conn->state = RXRPC_CONN_SERVICE; + /* See if we should upgrade the service. This can only happen on the + * first packet on a new connection. Once done, it applies to all + * subsequent calls on that connection. + */ + if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE && + conn->service_id == rx->service_upgrade.from) + conn->service_id = rx->service_upgrade.to; + /* Make the connection a target for incoming packets. */ rxrpc_publish_service_conn(conn->params.peer, conn); -- cgit v1.2.3 From 4e255721d1575a766ada06dc7eb03acdcd34eaaf Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 5 Jun 2017 14:30:49 +0100 Subject: rxrpc: Add service upgrade support for client connections Make it possible for a client to use AuriStor's service upgrade facility. The client does this by adding an RXRPC_UPGRADE_SERVICE control message to the first sendmsg() of a call. This takes no parameters. When recvmsg() starts returning data from the call, the service ID field in the returned msg_name will reflect the result of the upgrade attempt. If the upgrade was ignored, srx_service will match what was set in the sendmsg(); if the upgrade happened the srx_service will be altered to indicate the service the server upgraded to. Note that: (1) The choice of upgrade service is up to the server (2) Further client calls to the same server that would share a connection are blocked if an upgrade probe is in progress. (3) This should only be used to probe the service. Clients should then use the returned service ID in all subsequent communications with that server (and not set the upgrade). Note that the kernel will not retain this information should the connection expire from its cache. (4) If a server that supports upgrading is replaced by one that doesn't, whilst a connection is live, and if the replacement is running, say, OpenAFS 1.6.4 or older or an older IBM AFS, then the replacement server will not respond to packets sent to the upgraded connection. At this point, calls will time out and the server must be reprobed. Signed-off-by: David Howells --- Documentation/networking/rxrpc.txt | 30 ++++++++++++++++++++++++++ include/linux/rxrpc.h | 1 + include/trace/events/rxrpc.h | 1 + net/rxrpc/ar-internal.h | 3 +++ net/rxrpc/conn_client.c | 43 +++++++++++++++++++++++++++++++------- net/rxrpc/input.c | 17 +++++++++++++++ net/rxrpc/output.c | 4 ++++ net/rxrpc/sendmsg.c | 19 +++++++++++++---- 8 files changed, 106 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 2a1662760450..18078e630a63 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -325,6 +325,8 @@ calls, to invoke certain actions and to report certain conditions. These are: RXRPC_LOCAL_ERROR -rt error num Local error encountered RXRPC_NEW_CALL -r- n/a New call received RXRPC_ACCEPT s-- n/a Accept new call + RXRPC_EXCLUSIVE_CALL s-- n/a Make an exclusive client call + RXRPC_UPGRADE_SERVICE s-- n/a Client call can be upgraded (SRT = usable in Sendmsg / delivered by Recvmsg / Terminal message) @@ -387,6 +389,23 @@ calls, to invoke certain actions and to report certain conditions. These are: return error ENODATA. If the user ID is already in use by another call, then error EBADSLT will be returned. + (*) RXRPC_EXCLUSIVE_CALL + + This is used to indicate that a client call should be made on a one-off + connection. The connection is discarded once the call has terminated. + + (*) RXRPC_UPGRADE_SERVICE + + This is used to make a client call to probe if the specified service ID + may be upgraded by the server. The caller must check msg_name returned to + recvmsg() for the service ID actually in use. The operation probed must + be one that takes the same arguments in both services. + + Once this has been used to establish the upgrade capability (or lack + thereof) of the server, the service ID returned should be used for all + future communication to that server and RXRPC_UPGRADE_SERVICE should no + longer be set. + ============== SOCKET OPTIONS @@ -566,6 +585,17 @@ A client would issue an operation by: buffer instead, and MSG_EOR will be flagged to indicate the end of that call. +A client may ask for a service ID it knows and ask that this be upgraded to a +better service if one is available by supplying RXRPC_UPGRADE_SERVICE on the +first sendmsg() of a call. The client should then check srx_service in the +msg_name filled in by recvmsg() when collecting the result. srx_service will +hold the same value as given to sendmsg() if the upgrade request was ignored by +the service - otherwise it will be altered to indicate the service ID the +server upgraded to. Note that the upgraded service ID is chosen by the server. +The caller has to wait until it sees the service ID in the reply before sending +any more calls (further calls to the same destination will be blocked until the +probe is concluded). + ==================== EXAMPLE SERVER USAGE diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index 634116561a6a..707910c6c6c5 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -54,6 +54,7 @@ struct sockaddr_rxrpc { #define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ #define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ #define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ +#define RXRPC_UPGRADE_SERVICE 11 /* s-: Request service upgrade for client call */ /* * RxRPC security levels diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 29a3d53a4015..ebe96796027a 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -233,6 +233,7 @@ enum rxrpc_congest_change { EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \ EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \ EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \ + EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \ EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \ E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \ diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index c1ebd886a53f..e9b536cb0acf 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -320,6 +320,7 @@ struct rxrpc_conn_parameters { struct rxrpc_peer *peer; /* Remote endpoint */ struct key *key; /* Security details */ bool exclusive; /* T if conn is exclusive */ + bool upgrade; /* T if service ID can be upgraded */ u16 service_id; /* Service ID for this connection */ u32 security_level; /* Security level selected */ }; @@ -334,6 +335,7 @@ enum rxrpc_conn_flag { RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */ RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ + RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ }; /* @@ -350,6 +352,7 @@ enum rxrpc_conn_cache_state { RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */ RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */ RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */ + RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */ RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */ RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */ RXRPC_CONN__NR_CACHE_STATES diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 3f358bf424ad..dd8bb919c15a 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -36,12 +36,15 @@ * * rxrpc_nr_active_client_conns is held incremented also. * - * (4) CULLED - The connection got summarily culled to try and free up + * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is + * being used to probe for service upgrade. + * + * (5) CULLED - The connection got summarily culled to try and free up * capacity. Calls currently in progress on the connection are allowed to * continue, but new calls will have to wait. There can be no waiters in * this state - the conn would have to go to the WAITING state instead. * - * (5) IDLE - The connection has no calls in progress upon it and must have + * (6) IDLE - The connection has no calls in progress upon it and must have * been exposed to the world (ie. the EXPOSED flag must be set). When it * expires, the EXPOSED flag is cleared and the connection transitions to * the INACTIVE state. @@ -184,6 +187,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) atomic_set(&conn->usage, 1); if (cp->exclusive) __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); + if (cp->upgrade) + __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); conn->params = *cp; conn->out_clientflag = RXRPC_CLIENT_INITIATED; @@ -300,7 +305,8 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, #define cmp(X) ((long)conn->params.X - (long)cp->X) diff = (cmp(peer) ?: cmp(key) ?: - cmp(security_level)); + cmp(security_level) ?: + cmp(upgrade)); #undef cmp if (diff < 0) { p = p->rb_left; @@ -365,7 +371,8 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, #define cmp(X) ((long)conn->params.X - (long)candidate->params.X) diff = (cmp(peer) ?: cmp(key) ?: - cmp(security_level)); + cmp(security_level) ?: + cmp(upgrade)); #undef cmp if (diff < 0) { pp = &(*pp)->rb_left; @@ -436,8 +443,13 @@ error: static void rxrpc_activate_conn(struct rxrpc_net *rxnet, struct rxrpc_connection *conn) { - trace_rxrpc_client(conn, -1, rxrpc_client_to_active); - conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; + if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { + trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade); + conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE; + } else { + trace_rxrpc_client(conn, -1, rxrpc_client_to_active); + conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; + } rxnet->nr_active_client_conns++; list_move_tail(&conn->cache_link, &rxnet->active_client_conns); } @@ -461,7 +473,8 @@ static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, _enter("%d,%d", conn->debug_id, conn->cache_state); - if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE) + if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE || + conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE) goto out; spin_lock(&rxnet->client_conn_cache_lock); @@ -474,6 +487,7 @@ static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, switch (conn->cache_state) { case RXRPC_CONN_CLIENT_ACTIVE: + case RXRPC_CONN_CLIENT_UPGRADE: case RXRPC_CONN_CLIENT_WAITING: break; @@ -577,6 +591,9 @@ static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn) case RXRPC_CONN_CLIENT_ACTIVE: mask = RXRPC_ACTIVE_CHANS_MASK; break; + case RXRPC_CONN_CLIENT_UPGRADE: + mask = 0x01; + break; default: return; } @@ -787,6 +804,15 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) spin_lock(&rxnet->client_conn_cache_lock); switch (conn->cache_state) { + case RXRPC_CONN_CLIENT_UPGRADE: + /* Deal with termination of a service upgrade probe. */ + if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) { + clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); + trace_rxrpc_client(conn, channel, rxrpc_client_to_active); + conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; + rxrpc_activate_channels_locked(conn); + } + /* fall through */ case RXRPC_CONN_CLIENT_ACTIVE: if (list_empty(&conn->waiting_calls)) { rxrpc_deactivate_one_channel(conn, channel); @@ -941,7 +967,8 @@ static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) ASSERT(!list_empty(&rxnet->active_client_conns)); conn = list_entry(rxnet->active_client_conns.next, struct rxrpc_connection, cache_link); - ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE); + ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE, + conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE); if (list_empty(&conn->waiting_calls)) { trace_rxrpc_client(conn, -1, rxrpc_client_to_culled); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 45dba732a3b4..e56e23ed2229 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1142,6 +1142,13 @@ void rxrpc_data_ready(struct sock *udp_sk) if (sp->hdr.securityIndex != conn->security_ix) goto wrong_security; + if (sp->hdr.serviceId != conn->service_id) { + if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || + conn->service_id != conn->params.service_id) + goto reupgrade; + conn->service_id = sp->hdr.serviceId; + } + if (sp->hdr.callNumber == 0) { /* Connection-level packet */ _debug("CONN %p {%d}", conn, conn->debug_id); @@ -1194,6 +1201,9 @@ void rxrpc_data_ready(struct sock *udp_sk) rxrpc_input_implicit_end_call(conn, call); call = NULL; } + + if (call && sp->hdr.serviceId != call->service_id) + call->service_id = sp->hdr.serviceId; } else { skew = 0; call = NULL; @@ -1237,11 +1247,18 @@ wrong_security: skb->priority = RXKADINCONSISTENCY; goto post_abort; +reupgrade: + rcu_read_unlock(); + trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + RX_PROTOCOL_ERROR, EBADMSG); + goto protocol_error; + bad_message_unlock: rcu_read_unlock(); bad_message: trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_PROTOCOL_ERROR, EBADMSG); +protocol_error: skb->priority = RX_PROTOCOL_ERROR; post_abort: skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 5dab1ff3a6c2..5bd2d0fa4a03 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -292,6 +292,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, whdr._rsvd = htons(sp->hdr._rsvd); whdr.serviceId = htons(call->service_id); + if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) && + sp->hdr.seq == 1) + whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE; + iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); iov[1].iov_base = skb->head; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 96ffa5d5733b..5a4801e7f560 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -366,7 +366,8 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, unsigned long *user_call_ID, enum rxrpc_command *command, u32 *abort_code, - bool *_exclusive) + bool *_exclusive, + bool *_upgrade) { struct cmsghdr *cmsg; bool got_user_ID = false; @@ -429,6 +430,13 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, if (len != 0) return -EINVAL; break; + + case RXRPC_UPGRADE_SERVICE: + *_upgrade = true; + if (len != 0) + return -EINVAL; + break; + default: return -EINVAL; } @@ -447,7 +455,8 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, */ static struct rxrpc_call * rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, - unsigned long user_call_ID, bool exclusive) + unsigned long user_call_ID, bool exclusive, + bool upgrade) __releases(&rx->sk.sk_lock.slock) { struct rxrpc_conn_parameters cp; @@ -472,6 +481,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, cp.key = rx->key; cp.security_level = rx->min_sec_level; cp.exclusive = rx->exclusive | exclusive; + cp.upgrade = upgrade; cp.service_id = srx->srx_service; call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); /* The socket is now unlocked */ @@ -493,13 +503,14 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) struct rxrpc_call *call; unsigned long user_call_ID = 0; bool exclusive = false; + bool upgrade = true; u32 abort_code = 0; int ret; _enter(""); ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, - &exclusive); + &exclusive, &upgrade); if (ret < 0) goto error_release_sock; @@ -521,7 +532,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (cmd != RXRPC_CMD_SEND_DATA) goto error_release_sock; call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, - exclusive); + exclusive, upgrade); /* The socket is now unlocked... */ if (IS_ERR(call)) return PTR_ERR(call); -- cgit v1.2.3 From 1e0ce2a1ee0d5fb334e82e80600f1e8e77df525c Mon Sep 17 00:00:00 2001 From: Anmol Sarma Date: Sat, 3 Jun 2017 17:40:54 +0530 Subject: net: Update TCP congestion control documentation Update tcp.txt to fix mandatory congestion control ops and default CCA selection. Also, fix comment in tcp.h for undo_cwnd. Signed-off-by: Anmol Sarma Signed-off-by: David S. Miller --- Documentation/networking/tcp.txt | 31 +++++++++++++------------------ include/net/tcp.h | 2 +- 2 files changed, 14 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt index bdc4c0db51e1..9c7139d57e57 100644 --- a/Documentation/networking/tcp.txt +++ b/Documentation/networking/tcp.txt @@ -1,7 +1,7 @@ TCP protocol ============ -Last updated: 9 February 2008 +Last updated: 3 June 2017 Contents ======== @@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms. A congestion control mechanism can be registered through functions in tcp_cong.c. The functions used by the congestion control mechanism are registered via passing a tcp_congestion_ops struct to -tcp_register_congestion_control. As a minimum name, ssthresh, -cong_avoid must be valid. +tcp_register_congestion_control. As a minimum, the congestion control +mechanism must provide a valid name and must implement either ssthresh, +cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook. Private data for a congestion control mechanism is stored in tp->ca_priv. tcp_ca(tp) returns a pointer to this space. This is preallocated space - it is important to check the size of your private data will fit this space, or -alternatively space could be allocated elsewhere and a pointer to it could +alternatively, space could be allocated elsewhere and a pointer to it could be stored here. There are three kinds of congestion control algorithms currently: The simplest ones are derived from TCP reno (highspeed, scalable) and just -provide an alternative the congestion window calculation. More complex +provide an alternative congestion window calculation. More complex ones like BIC try to look at other events to provide better heuristics. There are also round trip time based algorithms like Vegas and Westwood+. @@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm needs to maintain fairness and performance. Please review current research and RFC's before developing new modules. -The method that is used to determine which congestion control mechanism is -determined by the setting of the sysctl net.ipv4.tcp_congestion_control. -The default congestion control will be the last one registered (LIFO); -so if you built everything as modules, the default will be reno. If you -build with the defaults from Kconfig, then CUBIC will be builtin (not a -module) and it will end up the default. +The default congestion control mechanism is chosen based on the +DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default +value then you can set it using sysctl net.ipv4.tcp_congestion_control. The +module will be autoloaded if needed and you will get the expected protocol. If +you ask for an unknown congestion method, then the sysctl attempt will fail. -If you really want a particular default value then you will need -to set it with the sysctl. If you use a sysctl, the module will be autoloaded -if needed and you will get the expected protocol. If you ask for an -unknown congestion method, then the sysctl attempt will fail. - -If you remove a tcp congestion control module, then you will get the next +If you remove a TCP congestion control module, then you will get the next available one. Since reno cannot be built as a module, and cannot be -deleted, it will always be available. +removed, it will always be available. How the new TCP output machine [nyi] works. =========================================== diff --git a/include/net/tcp.h b/include/net/tcp.h index 38a7427ae902..be6223c586fa 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -924,7 +924,7 @@ struct tcp_congestion_ops { void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); /* call when ack arrives (optional) */ void (*in_ack_event)(struct sock *sk, u32 flags); - /* new value of cwnd after loss (optional) */ + /* new value of cwnd after loss (required) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); -- cgit v1.2.3 From 4e2ec43654f240a3731612764850140f669d1b4b Mon Sep 17 00:00:00 2001 From: "Rosen, Rami" Date: Sun, 4 Jun 2017 15:20:01 +0300 Subject: genetlink: remove ops_list from genetlink header. commit d91824c08fbc ("genetlink: register family ops as array") removed the ops_list member from both genl_family and genl_ops; while the documentation of genl_family was updated accordingly by this patch, ops_list remained in the documentation of the genl_ops object. This patch fixes it by removing ops_list from genl_ops documentation. Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- include/net/genetlink.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 68b88192b00c..c59a098221db 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -128,7 +128,6 @@ static inline int genl_err_attr(struct genl_info *info, int err, * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps - * @ops_list: operations list */ struct genl_ops { const struct nla_policy *policy; -- cgit v1.2.3 From 82c01a84d5a9bd3b9347bb03eed2f05bbccef933 Mon Sep 17 00:00:00 2001 From: "yuval.shaia@oracle.com" Date: Sun, 4 Jun 2017 20:22:00 +0300 Subject: net/{mii, smsc}: Make mii_ethtool_get_link_ksettings and smc_netdev_get_ecmd return void Make return value void since functions never returns meaningfull value. Signed-off-by: Yuval Shaia Signed-off-by: David S. Miller --- drivers/net/cris/eth_v10.c | 5 ++--- drivers/net/ethernet/3com/3c59x.c | 4 +++- drivers/net/ethernet/amd/pcnet32.c | 5 +---- drivers/net/ethernet/cirrus/ep93xx_eth.c | 5 ++++- drivers/net/ethernet/dec/tulip/winbond-840.c | 5 ++--- drivers/net/ethernet/faraday/ftmac100.c | 5 ++++- drivers/net/ethernet/fealnx.c | 5 ++--- drivers/net/ethernet/intel/e100.c | 5 ++++- drivers/net/ethernet/jme.c | 5 ++--- drivers/net/ethernet/korina.c | 5 ++--- drivers/net/ethernet/micrel/ks8851.c | 5 ++++- drivers/net/ethernet/micrel/ks8851_mll.c | 5 ++++- drivers/net/ethernet/nuvoton/w90p910_ether.c | 5 ++++- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 6 +++--- drivers/net/ethernet/realtek/8139cp.c | 5 ++--- drivers/net/ethernet/realtek/r8169.c | 4 +++- drivers/net/ethernet/sgi/ioc3-eth.c | 5 ++--- drivers/net/ethernet/sis/sis190.c | 4 +++- drivers/net/ethernet/smsc/epic100.c | 5 ++--- drivers/net/ethernet/smsc/smc911x.c | 7 +++---- drivers/net/ethernet/smsc/smc91c92_cs.c | 13 +++++-------- drivers/net/ethernet/smsc/smc91x.c | 7 ++----- drivers/net/ethernet/tundra/tsi108_eth.c | 5 ++--- drivers/net/ethernet/via/via-rhine.c | 5 ++--- drivers/net/mii.c | 8 ++------ drivers/net/usb/ax88179_178a.c | 5 ++++- drivers/net/usb/r8152.c | 2 +- drivers/net/usb/usbnet.c | 4 +++- include/linux/mii.h | 2 +- 29 files changed, 78 insertions(+), 73 deletions(-) (limited to 'include') diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index da020418a652..017f48cdcab9 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1417,10 +1417,9 @@ static int e100_get_link_ksettings(struct net_device *dev, { struct net_local *np = netdev_priv(dev); u32 supported; - int err; spin_lock_irq(&np->lock); - err = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); /* The PHY may support 1000baseT, but the Etrax100 does not. */ @@ -1432,7 +1431,7 @@ static int e100_get_link_ksettings(struct net_device *dev, ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); - return err; + return 0; } static int e100_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index e41245a54f8b..14cff6017756 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2912,7 +2912,9 @@ static int vortex_get_link_ksettings(struct net_device *dev, { struct vortex_private *vp = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(&vp->mii, cmd); + mii_ethtool_get_link_ksettings(&vp->mii, cmd); + + return 0; } static int vortex_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 86369d7c9a0f..7f60d17819ce 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -731,12 +731,10 @@ static int pcnet32_get_link_ksettings(struct net_device *dev, { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; - int r = -EOPNOTSUPP; spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); - r = 0; } else if (lp->chip_version == PCNET32_79C970A) { if (lp->autoneg) { cmd->base.autoneg = AUTONEG_ENABLE; @@ -753,10 +751,9 @@ static int pcnet32_get_link_ksettings(struct net_device *dev, ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.supported, SUPPORTED_TP | SUPPORTED_AUI); - r = 0; } spin_unlock_irqrestore(&lp->lock, flags); - return r; + return 0; } static int pcnet32_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 7a7c02f1f8b9..e2a702996db4 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -702,7 +702,10 @@ static int ep93xx_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct ep93xx_priv *ep = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(&ep->mii, cmd); + + mii_ethtool_get_link_ksettings(&ep->mii, cmd); + + return 0; } static int ep93xx_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index d1f2f3cc7cfa..32d7229544fa 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1395,13 +1395,12 @@ static int netdev_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); - int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); - return rc; + return 0; } static int netdev_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 1536356e2ea8..66928a922824 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -829,7 +829,10 @@ static int ftmac100_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_get_link_ksettings(&priv->mii, cmd); + + mii_ethtool_get_link_ksettings(&priv->mii, cmd); + + return 0; } static int ftmac100_set_link_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 766636a7c25e..610f9c07c21d 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1821,13 +1821,12 @@ static int netdev_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); - int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); + mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); - return rc; + return 0; } static int netdev_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 2b7323d392dc..4d10270ddf8f 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2430,7 +2430,10 @@ static int e100_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); - return mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; } static int e100_set_link_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 0e5083a48937..62d848df26ef 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2610,12 +2610,11 @@ jme_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); - int rc; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); + mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); - return rc; + return 0; } static int diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 9fae98caf83a..3c0a6451273d 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -699,13 +699,12 @@ static int netdev_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); - int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); + mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); - return rc; + return 0; } static int netdev_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 20358f87de57..2fe96f1f3fe5 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -1071,7 +1071,10 @@ static int ks8851_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct ks8851_net *ks = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(&ks->mii, cmd); + + mii_ethtool_get_link_ksettings(&ks->mii, cmd); + + return 0; } static int ks8851_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 7647f7bdbcb8..f3e9dd47b56f 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1315,7 +1315,10 @@ static int ks_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_get_link_ksettings(&ks->mii, cmd); + + mii_ethtool_get_link_ksettings(&ks->mii, cmd); + + return 0; } static int ks_set_link_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 159564d8dcdb..89ab786da25f 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -868,7 +868,10 @@ static int w90p910_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct w90p910_ether *ether = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(ðer->mii, cmd); + + mii_ethtool_get_link_ksettings(ðer->mii, cmd); + + return 0; } static int w90p910_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 21093276d2b7..731ce1e419e4 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -85,9 +85,8 @@ static int pch_gbe_get_link_ksettings(struct net_device *netdev, { struct pch_gbe_adapter *adapter = netdev_priv(netdev); u32 supported, advertising; - int ret; - ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd); + mii_ethtool_get_link_ksettings(&adapter->mii, ecmd); ethtool_convert_link_mode_to_legacy_u32(&supported, ecmd->link_modes.supported); @@ -104,7 +103,8 @@ static int pch_gbe_get_link_ksettings(struct net_device *netdev, if (!netif_carrier_ok(adapter->netdev)) ecmd->base.speed = SPEED_UNKNOWN; - return ret; + + return 0; } /** diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 72233ab9474b..e7ab23e87de2 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1410,14 +1410,13 @@ static int cp_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct cp_private *cp = netdev_priv(dev); - int rc; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); - rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd); + mii_ethtool_get_link_ksettings(&cp->mii_if, cmd); spin_unlock_irqrestore(&cp->lock, flags); - return rc; + return 0; } static int cp_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0a8f2817ea60..bd07a15d3b7c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2148,7 +2148,9 @@ static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, { struct rtl8169_private *tp = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(&tp->mii, cmd); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); + + return 0; } static int rtl8169_get_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 52ead5524de7..b607936e1b3e 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1562,13 +1562,12 @@ static int ioc3_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct ioc3_private *ip = netdev_priv(dev); - int rc; spin_lock_irq(&ip->ioc3_lock); - rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd); + mii_ethtool_get_link_ksettings(&ip->mii, cmd); spin_unlock_irq(&ip->ioc3_lock); - return rc; + return 0; } static int ioc3_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 02da106c6e04..445109bd6910 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1739,7 +1739,9 @@ static int sis190_get_link_ksettings(struct net_device *dev, { struct sis190_private *tp = netdev_priv(dev); - return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd); + mii_ethtool_get_link_ksettings(&tp->mii_if, cmd); + + return 0; } static int sis190_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index db6dcb06193d..6a0e1d4b597c 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1391,13 +1391,12 @@ static int netdev_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct epic_private *np = netdev_priv(dev); - int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); + mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); - return rc; + return 0; } static int netdev_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 36307d34f641..05157442a980 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1450,7 +1450,7 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct smc911x_local *lp = netdev_priv(dev); - int ret, status; + int status; unsigned long flags; u32 supported; @@ -1458,7 +1458,7 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev, if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd); + mii_ethtool_get_link_ksettings(&lp->mii, cmd); spin_unlock_irqrestore(&lp->lock, flags); } else { supported = SUPPORTED_10baseT_Half | @@ -1480,10 +1480,9 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev, ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.supported, supported); - ret = 0; } - return ret; + return 0; } static int diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 976aa876789a..92c927aec66d 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1843,8 +1843,8 @@ static int smc_link_ok(struct net_device *dev) } } -static int smc_netdev_get_ecmd(struct net_device *dev, - struct ethtool_link_ksettings *ecmd) +static void smc_netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { u16 tmp; unsigned int ioaddr = dev->base_addr; @@ -1865,8 +1865,6 @@ static int smc_netdev_get_ecmd(struct net_device *dev, ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); - - return 0; } static int smc_netdev_set_ecmd(struct net_device *dev, @@ -1918,18 +1916,17 @@ static int smc_get_link_ksettings(struct net_device *dev, struct smc_private *smc = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; u16 saved_bank = inw(ioaddr + BANK_SELECT); - int ret; unsigned long flags; spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) - ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd); else - ret = smc_netdev_get_ecmd(dev, ecmd); + smc_netdev_get_ecmd(dev, ecmd); SMC_SELECT_BANK(saved_bank); spin_unlock_irqrestore(&smc->lock, flags); - return ret; + return 0; } static int smc_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 91e9bd7159ab..0d230b125c6c 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1539,11 +1539,10 @@ smc_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct smc_local *lp = netdev_priv(dev); - int ret; if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); - ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd); + mii_ethtool_get_link_ksettings(&lp->mii, cmd); spin_unlock_irq(&lp->lock); } else { u32 supported = SUPPORTED_10baseT_Half | @@ -1562,11 +1561,9 @@ smc_ethtool_get_link_ksettings(struct net_device *dev, ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.supported, supported); - - ret = 0; } - return ret; + return 0; } static int diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 5ac6eaa9e785..c2d15d9c0c33 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1504,13 +1504,12 @@ static int tsi108_get_link_ksettings(struct net_device *dev, { struct tsi108_prv_data *data = netdev_priv(dev); unsigned long flags; - int rc; spin_lock_irqsave(&data->txlock, flags); - rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd); + mii_ethtool_get_link_ksettings(&data->mii_if, cmd); spin_unlock_irqrestore(&data->txlock, flags); - return rc; + return 0; } static int tsi108_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 4cf41f779d0e..acd29d60174a 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2307,13 +2307,12 @@ static int netdev_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct rhine_private *rp = netdev_priv(dev); - int rc; mutex_lock(&rp->task_lock); - rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); + mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); mutex_unlock(&rp->task_lock); - return rc; + return 0; } static int netdev_set_link_ksettings(struct net_device *dev, diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 6d953c53eed6..44612122338b 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -141,11 +141,9 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) * * The @cmd parameter is expected to have been cleared before calling * mii_ethtool_get_link_ksettings(). - * - * Returns 0 for success, negative on error. */ -int mii_ethtool_get_link_ksettings(struct mii_if_info *mii, - struct ethtool_link_ksettings *cmd) +void mii_ethtool_get_link_ksettings(struct mii_if_info *mii, + struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; @@ -227,8 +225,6 @@ int mii_ethtool_get_link_ksettings(struct mii_if_info *mii, lp_advertising); /* ignore maxtxpkt, maxrxpkt for now */ - - return 0; } /** diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 51cf60092a18..793ce900dffa 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -624,7 +624,10 @@ static int ax88179_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); - return mii_ethtool_get_link_ksettings(&dev->mii, cmd); + + mii_ethtool_get_link_ksettings(&dev->mii, cmd); + + return 0; } static int ax88179_set_link_ksettings(struct net_device *net, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e902df9595b9..fd31fab2a9da 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3835,7 +3835,7 @@ int rtl8152_get_link_ksettings(struct net_device *netdev, mutex_lock(&tp->control); - ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); mutex_unlock(&tp->control); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 79048e72c1bd..6510e5cc1817 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -956,7 +956,9 @@ int usbnet_get_link_ksettings(struct net_device *net, if (!dev->mii.mdio_read) return -EOPNOTSUPP; - return mii_ethtool_get_link_ksettings(&dev->mii, cmd); + mii_ethtool_get_link_ksettings(&dev->mii, cmd); + + return 0; } EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings); diff --git a/include/linux/mii.h b/include/linux/mii.h index 1629a0c32679..e870bfa6abfe 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -31,7 +31,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); -extern int mii_ethtool_get_link_ksettings( +extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_set_link_ksettings( -- cgit v1.2.3 From e25ea21ffa66a029acfa89d2611c0e7ef23e7d8c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 6 Jun 2017 14:12:02 +0200 Subject: net: sched: introduce a TRAP control action There is need to instruct the HW offloaded path to push certain matched packets to cpu/kernel for further analysis. So this patch introduces a new TRAP control action to TC. For kernel datapath, this action does not make much sense. So with the same logic as in HW, new TRAP behaves similar to STOLEN. The skb is just dropped in the datapath (and virtually ejected to an upper level, which does not exist in case of kernel). Signed-off-by: Jiri Pirko Reviewed-by: Yotam Gigi Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 7 +++++++ net/core/dev.c | 2 ++ net/sched/cls_bpf.c | 1 + net/sched/sch_atm.c | 1 + net/sched/sch_cbq.c | 1 + net/sched/sch_drr.c | 1 + net/sched/sch_dsmark.c | 1 + net/sched/sch_fq_codel.c | 1 + net/sched/sch_hfsc.c | 1 + net/sched/sch_htb.c | 1 + net/sched/sch_multiq.c | 1 + net/sched/sch_prio.c | 1 + net/sched/sch_qfq.c | 1 + net/sched/sch_sfb.c | 1 + net/sched/sch_sfq.c | 1 + 15 files changed, 22 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index edf43ddf47b0..2055783e6ee9 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -37,6 +37,13 @@ enum { #define TC_ACT_QUEUED 5 #define TC_ACT_REPEAT 6 #define TC_ACT_REDIRECT 7 +#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu" + * and don't further process the frame + * in hardware. For sw path, this is + * equivalent of TC_ACT_STOLEN - drop + * the skb and act like everything + * is alright. + */ /* There is a special kind of actions called "extended actions", * which need a value parameter. These have a local opcode located in diff --git a/net/core/dev.c b/net/core/dev.c index 06e0a7492df8..8f72f4a9c6ac 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3269,6 +3269,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *ret = NET_XMIT_SUCCESS; consume_skb(skb); return NULL; @@ -4038,6 +4039,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: consume_skb(skb); return NULL; case TC_ACT_REDIRECT: diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5ebeae996e63..a9c56ad4533a 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -70,6 +70,7 @@ static int cls_bpf_exec_opcode(int code) case TC_ACT_OK: case TC_ACT_SHOT: case TC_ACT_STOLEN: + case TC_ACT_TRAP: case TC_ACT_REDIRECT: case TC_ACT_UNSPEC: return code; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index f435546c3864..de162592eee0 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -406,6 +406,7 @@ done: switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 8dd6d0aca678..481036f6b54e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -254,6 +254,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 5db2a2843c66..a413dc1c2098 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -339,6 +339,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 7ccdd825d34e..6d94fcc3592a 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -243,6 +243,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, #ifdef CONFIG_NET_CLS_ACT case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index f201e73947fb..337f2d6d81e4 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -103,6 +103,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return 0; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index a324f84b1ccd..b52f74610dc7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1155,6 +1155,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 195bbca9eb0b..203286ab4427 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -238,6 +238,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 604767482ad0..f143b7bbaa0d 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -52,6 +52,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a2404688dd01..e3e364cc9a70 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -48,6 +48,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 076ad032befb..0e16dfda0bd7 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -726,6 +726,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 9756b1ccd345..11fb6ec878d6 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -266,6 +266,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return false; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 66dfd15b7946..f80ea2cc5f1f 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return 0; -- cgit v1.2.3 From 5a4d1fee2f844813cb2092b7a06b0e20ed9e2fa4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 6 Jun 2017 14:12:03 +0200 Subject: net: sched: introduce helper to identify gact trap action Introduce a helper called is_tcf_gact_trap which could be used to tell if the action is gact trap or not. Signed-off-by: Jiri Pirko Reviewed-by: Yotam Gigi Signed-off-by: David S. Miller --- include/net/tc_act/tc_gact.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index b6f173910226..d576374c4d6f 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -15,7 +15,7 @@ struct tcf_gact { }; #define to_gact(a) ((struct tcf_gact *)a) -static inline bool is_tcf_gact_shot(const struct tc_action *a) +static inline bool __is_tcf_gact_act(const struct tc_action *a, int act) { #ifdef CONFIG_NET_CLS_ACT struct tcf_gact *gact; @@ -24,10 +24,21 @@ static inline bool is_tcf_gact_shot(const struct tc_action *a) return false; gact = to_gact(a); - if (gact->tcf_action == TC_ACT_SHOT) + if (gact->tcf_action == act) return true; #endif return false; } + +static inline bool is_tcf_gact_shot(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_SHOT); +} + +static inline bool is_tcf_gact_trap(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_TRAP); +} + #endif /* __NET_TC_GACT_H */ -- cgit v1.2.3 From f8fe99754673719ab791713a676bf27dae616fbc Mon Sep 17 00:00:00 2001 From: "yuval.shaia@oracle.com" Date: Mon, 5 Jun 2017 10:18:40 +0300 Subject: net: phy: Delete unused function phy_ethtool_gset It's unused, so remove it. Signed-off-by: Yuval Shaia Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/networking/phy.txt | 1 - drivers/net/phy/phy.c | 24 ------------------------ include/linux/phy.h | 1 - 3 files changed, 26 deletions(-) (limited to 'include') diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt index 16f90d817224..bdec0f700bc1 100644 --- a/Documentation/networking/phy.txt +++ b/Documentation/networking/phy.txt @@ -295,7 +295,6 @@ Doing it all yourself settings in the PHY. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); - int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); Ethtool convenience functions. diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 82ab8fb82587..40f4c6a2ef6c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -484,30 +484,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, } EXPORT_SYMBOL(phy_ethtool_ksettings_set); -int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) -{ - cmd->supported = phydev->supported; - - cmd->advertising = phydev->advertising; - cmd->lp_advertising = phydev->lp_advertising; - - ethtool_cmd_speed_set(cmd, phydev->speed); - cmd->duplex = phydev->duplex; - if (phydev->interface == PHY_INTERFACE_MODE_MOCA) - cmd->port = PORT_BNC; - else - cmd->port = PORT_MII; - cmd->phy_address = phydev->mdio.addr; - cmd->transceiver = phy_is_internal(phydev) ? - XCVR_INTERNAL : XCVR_EXTERNAL; - cmd->autoneg = phydev->autoneg; - cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl; - cmd->eth_tp_mdix = phydev->mdix; - - return 0; -} -EXPORT_SYMBOL(phy_ethtool_gset); - int phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd) { diff --git a/include/linux/phy.h b/include/linux/phy.h index 58f1b45a4c44..748e526c0698 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -854,7 +854,6 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_trigger_machine(struct phy_device *phydev, bool sync); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); -int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, -- cgit v1.2.3 From dc4bb0e2356149aee4cdae061936f3bbdd45595c Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:46 -0700 Subject: bpf: Introduce bpf_prog ID This patch generates an unique ID for each BPF_PROG_LOAD-ed prog. It is worth to note that each BPF_PROG_LOAD-ed prog will have a different ID even they have the same bpf instructions. The ID is generated by the existing idr_alloc_cyclic(). The ID is ranged from [1, INT_MAX). It is allocated in cyclic manner, so an ID will get reused every 2 billion BPF_PROG_LOAD. The bpf_prog_alloc_id() is done after bpf_prog_select_runtime() because the jit process may have allocated a new prog. Hence, we need to ensure the value of pointer 'prog' will not be changed any more before storing the prog to the prog_idr. After bpf_prog_select_runtime(), the prog is read-only. Hence, the id is stored in 'struct bpf_prog_aux'. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + kernel/bpf/syscall.c | 40 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fcc80ca11045..c5946d19f2ca 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -172,6 +172,7 @@ struct bpf_prog_aux { u32 used_map_cnt; u32 max_ctx_offset; u32 stack_depth; + u32 id; struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 59da103adb85..2a1b32b470f1 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -22,8 +22,11 @@ #include #include #include +#include DEFINE_PER_CPU(int, bpf_prog_active); +static DEFINE_IDR(prog_idr); +static DEFINE_SPINLOCK(prog_idr_lock); int sysctl_unprivileged_bpf_disabled __read_mostly; @@ -650,6 +653,34 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) free_uid(user); } +static int bpf_prog_alloc_id(struct bpf_prog *prog) +{ + int id; + + spin_lock_bh(&prog_idr_lock); + id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); + if (id > 0) + prog->aux->id = id; + spin_unlock_bh(&prog_idr_lock); + + /* id is in [1, INT_MAX) */ + if (WARN_ON_ONCE(!id)) + return -ENOSPC; + + return id > 0 ? 0 : id; +} + +static void bpf_prog_free_id(struct bpf_prog *prog) +{ + /* cBPF to eBPF migrations are currently not in the idr store. */ + if (!prog->aux->id) + return; + + spin_lock_bh(&prog_idr_lock); + idr_remove(&prog_idr, prog->aux->id); + spin_unlock_bh(&prog_idr_lock); +} + static void __bpf_prog_put_rcu(struct rcu_head *rcu) { struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); @@ -663,6 +694,7 @@ void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { trace_bpf_prog_put_rcu(prog); + bpf_prog_free_id(prog); bpf_prog_kallsyms_del(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } @@ -857,15 +889,21 @@ static int bpf_prog_load(union bpf_attr *attr) if (err < 0) goto free_used_maps; + err = bpf_prog_alloc_id(prog); + if (err) + goto free_used_maps; + err = bpf_prog_new_fd(prog); if (err < 0) /* failed to allocate fd */ - goto free_used_maps; + goto free_id; bpf_prog_kallsyms_add(prog); trace_bpf_prog_load(prog, err); return err; +free_id: + bpf_prog_free_id(prog); free_used_maps: free_used_maps(prog->aux); free_prog: -- cgit v1.2.3 From f3f1c054c288bb6e503005e6d73611151ed20e91 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:47 -0700 Subject: bpf: Introduce bpf_map ID This patch generates an unique ID for each created bpf_map. The approach is similar to the earlier patch for bpf_prog ID. It is worth to note that the bpf_map's ID and bpf_prog's ID are in two independent ID spaces and both have the same valid range: [1, INT_MAX). Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + kernel/bpf/syscall.c | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c5946d19f2ca..c32bace66d3d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -46,6 +46,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; u32 pages; + u32 id; struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2a1b32b470f1..4c3075b5d840 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -27,6 +27,8 @@ DEFINE_PER_CPU(int, bpf_prog_active); static DEFINE_IDR(prog_idr); static DEFINE_SPINLOCK(prog_idr_lock); +static DEFINE_IDR(map_idr); +static DEFINE_SPINLOCK(map_idr_lock); int sysctl_unprivileged_bpf_disabled __read_mostly; @@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map) free_uid(user); } +static int bpf_map_alloc_id(struct bpf_map *map) +{ + int id; + + spin_lock_bh(&map_idr_lock); + id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); + if (id > 0) + map->id = id; + spin_unlock_bh(&map_idr_lock); + + if (WARN_ON_ONCE(!id)) + return -ENOSPC; + + return id > 0 ? 0 : id; +} + +static void bpf_map_free_id(struct bpf_map *map) +{ + spin_lock_bh(&map_idr_lock); + idr_remove(&map_idr, map->id); + spin_unlock_bh(&map_idr_lock); +} + /* called from workqueue */ static void bpf_map_free_deferred(struct work_struct *work) { @@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map) void bpf_map_put(struct bpf_map *map) { if (atomic_dec_and_test(&map->refcnt)) { + bpf_map_free_id(map); INIT_WORK(&map->work, bpf_map_free_deferred); schedule_work(&map->work); } @@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr) if (err) goto free_map_nouncharge; + err = bpf_map_alloc_id(map); + if (err) + goto free_map; + err = bpf_map_new_fd(map); if (err < 0) /* failed to allocate fd */ - goto free_map; + goto free_id; trace_bpf_map_create(map, err); return err; +free_id: + bpf_map_free_id(map); free_map: bpf_map_uncharge_memlock(map); free_map_nouncharge: -- cgit v1.2.3 From 34ad5580f8f9c86cb273ebea25c149613cd1667e Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:48 -0700 Subject: bpf: Add BPF_(PROG|MAP)_GET_NEXT_ID command This patch adds BPF_PROG_GET_NEXT_ID and BPF_MAP_GET_NEXT_ID to allow userspace to iterate all bpf_prog IDs and bpf_map IDs. The API is trying to be consistent with the existing BPF_MAP_GET_NEXT_KEY. It is currently limited to CAP_SYS_ADMIN which we can consider to lift it in followup patches. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 7 +++++++ kernel/bpf/syscall.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e78aece03628..629747a3f273 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -82,6 +82,8 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_GET_NEXT_ID, + BPF_MAP_GET_NEXT_ID, }; enum bpf_map_type { @@ -209,6 +211,11 @@ union bpf_attr { __u32 repeat; __u32 duration; } test; + + struct { /* anonymous struct used by BPF_*_GET_NEXT_ID */ + __u32 start_id; + __u32 next_id; + }; } __attribute__((aligned(8))); /* BPF helper function descriptions: diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4c3075b5d840..2405feedb8c1 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -166,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map) void bpf_map_put(struct bpf_map *map) { if (atomic_dec_and_test(&map->refcnt)) { + /* bpf_map_free_id() must be called first */ bpf_map_free_id(map); INIT_WORK(&map->work, bpf_map_free_deferred); schedule_work(&map->work); @@ -726,6 +727,7 @@ void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { trace_bpf_prog_put_rcu(prog); + /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog); bpf_prog_kallsyms_del(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); @@ -1069,6 +1071,34 @@ static int bpf_prog_test_run(const union bpf_attr *attr, return ret; } +#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id + +static int bpf_obj_get_next_id(const union bpf_attr *attr, + union bpf_attr __user *uattr, + struct idr *idr, + spinlock_t *lock) +{ + u32 next_id = attr->start_id; + int err = 0; + + if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + next_id++; + spin_lock_bh(lock); + if (!idr_get_next(idr, &next_id)) + err = -ENOENT; + spin_unlock_bh(lock); + + if (!err) + err = put_user(next_id, &uattr->next_id); + + return err; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr = {}; @@ -1146,6 +1176,14 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_PROG_TEST_RUN: err = bpf_prog_test_run(&attr, uattr); break; + case BPF_PROG_GET_NEXT_ID: + err = bpf_obj_get_next_id(&attr, uattr, + &prog_idr, &prog_idr_lock); + break; + case BPF_MAP_GET_NEXT_ID: + err = bpf_obj_get_next_id(&attr, uattr, + &map_idr, &map_idr_lock); + break; default: err = -EINVAL; break; -- cgit v1.2.3 From b16d9aa4c2b90af8d2c3201e245150f8c430c3bc Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:49 -0700 Subject: bpf: Add BPF_PROG_GET_FD_BY_ID Add BPF_PROG_GET_FD_BY_ID command to allow user to get a fd from a bpf_prog's ID. bpf_prog_inc_not_zero() is added and is called with prog_idr_lock held. __bpf_prog_put() is also added which has the 'bool do_idr_lock' param to decide if the prog_idr_lock should be acquired when freeing the prog->id. In the error path of bpf_prog_inc_not_zero(), it may have to call __bpf_prog_put(map, false) which does not need to take the prog_idr_lock when freeing the prog->id. It is currently limited to CAP_SYS_ADMIN which we can consider to lift it in followup patches. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 8 +++-- kernel/bpf/syscall.c | 91 ++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 87 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 629747a3f273..d70cfed19d5e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -84,6 +84,7 @@ enum bpf_cmd { BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, + BPF_PROG_GET_FD_BY_ID, }; enum bpf_map_type { @@ -212,8 +213,11 @@ union bpf_attr { __u32 duration; } test; - struct { /* anonymous struct used by BPF_*_GET_NEXT_ID */ - __u32 start_id; + struct { /* anonymous struct used by BPF_*_GET_*_ID */ + union { + __u32 start_id; + __u32 prog_id; + }; __u32 next_id; }; } __attribute__((aligned(8))); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2405feedb8c1..dc6253bb8ebb 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -703,15 +703,23 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog) return id > 0 ? 0 : id; } -static void bpf_prog_free_id(struct bpf_prog *prog) +static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) { /* cBPF to eBPF migrations are currently not in the idr store. */ if (!prog->aux->id) return; - spin_lock_bh(&prog_idr_lock); + if (do_idr_lock) + spin_lock_bh(&prog_idr_lock); + else + __acquire(&prog_idr_lock); + idr_remove(&prog_idr, prog->aux->id); - spin_unlock_bh(&prog_idr_lock); + + if (do_idr_lock) + spin_unlock_bh(&prog_idr_lock); + else + __release(&prog_idr_lock); } static void __bpf_prog_put_rcu(struct rcu_head *rcu) @@ -723,16 +731,21 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) bpf_prog_free(aux->prog); } -void bpf_prog_put(struct bpf_prog *prog) +static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { if (atomic_dec_and_test(&prog->aux->refcnt)) { trace_bpf_prog_put_rcu(prog); /* bpf_prog_free_id() must be called first */ - bpf_prog_free_id(prog); + bpf_prog_free_id(prog, do_idr_lock); bpf_prog_kallsyms_del(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } } + +void bpf_prog_put(struct bpf_prog *prog) +{ + __bpf_prog_put(prog, true); +} EXPORT_SYMBOL_GPL(bpf_prog_put); static int bpf_prog_release(struct inode *inode, struct file *filp) @@ -814,6 +827,24 @@ struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) } EXPORT_SYMBOL_GPL(bpf_prog_inc); +/* prog_idr_lock should have been held */ +static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) +{ + int refold; + + refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); + + if (refold >= BPF_MAX_REFCNT) { + __bpf_prog_put(prog, false); + return ERR_PTR(-EBUSY); + } + + if (!refold) + return ERR_PTR(-ENOENT); + + return prog; +} + static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type) { struct fd f = fdget(ufd); @@ -928,16 +959,21 @@ static int bpf_prog_load(union bpf_attr *attr) goto free_used_maps; err = bpf_prog_new_fd(prog); - if (err < 0) - /* failed to allocate fd */ - goto free_id; + if (err < 0) { + /* failed to allocate fd. + * bpf_prog_put() is needed because the above + * bpf_prog_alloc_id() has published the prog + * to the userspace and the userspace may + * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. + */ + bpf_prog_put(prog); + return err; + } bpf_prog_kallsyms_add(prog); trace_bpf_prog_load(prog, err); return err; -free_id: - bpf_prog_free_id(prog); free_used_maps: free_used_maps(prog->aux); free_prog: @@ -1099,6 +1135,38 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr, return err; } +#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id + +static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) +{ + struct bpf_prog *prog; + u32 id = attr->prog_id; + int fd; + + if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + spin_lock_bh(&prog_idr_lock); + prog = idr_find(&prog_idr, id); + if (prog) + prog = bpf_prog_inc_not_zero(prog); + else + prog = ERR_PTR(-ENOENT); + spin_unlock_bh(&prog_idr_lock); + + if (IS_ERR(prog)) + return PTR_ERR(prog); + + fd = bpf_prog_new_fd(prog); + if (fd < 0) + bpf_prog_put(prog); + + return fd; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr = {}; @@ -1184,6 +1252,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_obj_get_next_id(&attr, uattr, &map_idr, &map_idr_lock); break; + case BPF_PROG_GET_FD_BY_ID: + err = bpf_prog_get_fd_by_id(&attr); + break; default: err = -EINVAL; break; -- cgit v1.2.3 From bd5f5f4ecb78e2698dad655645b6d6a2f7012a8c Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:50 -0700 Subject: bpf: Add BPF_MAP_GET_FD_BY_ID Add BPF_MAP_GET_FD_BY_ID command to allow user to get a fd from a bpf_map's ID. bpf_map_inc_not_zero() is added and is called with map_idr_lock held. __bpf_map_put() is also added which has the 'bool do_idr_lock' param to decide if the map_idr_lock should be acquired when freeing the map->id. In the error path of bpf_map_inc_not_zero(), it may have to call __bpf_map_put(map, false) which does not need to take the map_idr_lock when freeing the map->id. It is currently limited to CAP_SYS_ADMIN which we can consider to lift it in followup patches. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 2 + kernel/bpf/syscall.c | 95 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 87 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d70cfed19d5e..dd23f47ff00c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -85,6 +85,7 @@ enum bpf_cmd { BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, + BPF_MAP_GET_FD_BY_ID, }; enum bpf_map_type { @@ -217,6 +218,7 @@ union bpf_attr { union { __u32 start_id; __u32 prog_id; + __u32 map_id; }; __u32 next_id; }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index dc6253bb8ebb..1802bb9c47d9 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -135,11 +135,19 @@ static int bpf_map_alloc_id(struct bpf_map *map) return id > 0 ? 0 : id; } -static void bpf_map_free_id(struct bpf_map *map) +static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) { - spin_lock_bh(&map_idr_lock); + if (do_idr_lock) + spin_lock_bh(&map_idr_lock); + else + __acquire(&map_idr_lock); + idr_remove(&map_idr, map->id); - spin_unlock_bh(&map_idr_lock); + + if (do_idr_lock) + spin_unlock_bh(&map_idr_lock); + else + __release(&map_idr_lock); } /* called from workqueue */ @@ -163,16 +171,21 @@ static void bpf_map_put_uref(struct bpf_map *map) /* decrement map refcnt and schedule it for freeing via workqueue * (unrelying map implementation ops->map_free() might sleep) */ -void bpf_map_put(struct bpf_map *map) +static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) { if (atomic_dec_and_test(&map->refcnt)) { /* bpf_map_free_id() must be called first */ - bpf_map_free_id(map); + bpf_map_free_id(map, do_idr_lock); INIT_WORK(&map->work, bpf_map_free_deferred); schedule_work(&map->work); } } +void bpf_map_put(struct bpf_map *map) +{ + __bpf_map_put(map, true); +} + void bpf_map_put_with_uref(struct bpf_map *map) { bpf_map_put_uref(map); @@ -271,15 +284,20 @@ static int map_create(union bpf_attr *attr) goto free_map; err = bpf_map_new_fd(map); - if (err < 0) - /* failed to allocate fd */ - goto free_id; + if (err < 0) { + /* failed to allocate fd. + * bpf_map_put() is needed because the above + * bpf_map_alloc_id() has published the map + * to the userspace and the userspace may + * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. + */ + bpf_map_put(map); + return err; + } trace_bpf_map_create(map, err); return err; -free_id: - bpf_map_free_id(map); free_map: bpf_map_uncharge_memlock(map); free_map_nouncharge: @@ -331,6 +349,28 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) return map; } +/* map_idr_lock should have been held */ +static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, + bool uref) +{ + int refold; + + refold = __atomic_add_unless(&map->refcnt, 1, 0); + + if (refold >= BPF_MAX_REFCNT) { + __bpf_map_put(map, false); + return ERR_PTR(-EBUSY); + } + + if (!refold) + return ERR_PTR(-ENOENT); + + if (uref) + atomic_inc(&map->usercnt); + + return map; +} + int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) { return -ENOTSUPP; @@ -1167,6 +1207,38 @@ static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) return fd; } +#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id + +static int bpf_map_get_fd_by_id(const union bpf_attr *attr) +{ + struct bpf_map *map; + u32 id = attr->map_id; + int fd; + + if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + spin_lock_bh(&map_idr_lock); + map = idr_find(&map_idr, id); + if (map) + map = bpf_map_inc_not_zero(map, true); + else + map = ERR_PTR(-ENOENT); + spin_unlock_bh(&map_idr_lock); + + if (IS_ERR(map)) + return PTR_ERR(map); + + fd = bpf_map_new_fd(map); + if (fd < 0) + bpf_map_put(map); + + return fd; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr = {}; @@ -1255,6 +1327,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_PROG_GET_FD_BY_ID: err = bpf_prog_get_fd_by_id(&attr); break; + case BPF_MAP_GET_FD_BY_ID: + err = bpf_map_get_fd_by_id(&attr); + break; default: err = -EINVAL; break; -- cgit v1.2.3 From 783d28dd11f68fb25d1f2e0de7c42336394ef128 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:51 -0700 Subject: bpf: Add jited_len to struct bpf_prog Add jited_len to struct bpf_prog. It will be useful for the struct bpf_prog_info which will be added in the later patch. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 1 + arch/powerpc/net/bpf_jit_comp64.c | 1 + arch/s390/net/bpf_jit_comp.c | 1 + arch/sparc/net/bpf_jit_comp_64.c | 1 + arch/x86/net/bpf_jit_comp.c | 1 + include/linux/filter.h | 1 + 6 files changed, 6 insertions(+) (limited to 'include') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b1d38eeb24f6..4f95873d7142 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -900,6 +900,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.image; prog->jited = 1; + prog->jited_len = image_size; out_off: kfree(ctx.offset); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a01366584a4b..861c5af1c9c4 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -1052,6 +1052,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) fp->bpf_func = (void *)image; fp->jited = 1; + fp->jited_len = alloclen; bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 42ad3832586c..01c6fbc3e85b 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1329,6 +1329,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) bpf_jit_binary_lock_ro(header); fp->bpf_func = (void *) jit.prg_buf; fp->jited = 1; + fp->jited_len = jit.size; free_addrs: kfree(jit.addrs); out: diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 098874a81f6e..8799ae9a8788 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1560,6 +1560,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog->bpf_func = (void *)ctx.image; prog->jited = 1; + prog->jited_len = image_size; out_off: kfree(ctx.offset); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 617eac9c4511..e1324f280e06 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1167,6 +1167,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)image; prog->jited = 1; + prog->jited_len = proglen; } else { prog = orig_prog; } diff --git a/include/linux/filter.h b/include/linux/filter.h index a20ba40fcb73..1e2dddf21f3b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -432,6 +432,7 @@ struct bpf_prog { kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ -- cgit v1.2.3 From 1e270976908686ec25fb91b8a34145be54137976 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 Jun 2017 12:15:52 -0700 Subject: bpf: Add BPF_OBJ_GET_INFO_BY_FD A single BPF_OBJ_GET_INFO_BY_FD cmd is used to obtain the info for both bpf_prog and bpf_map. The kernel can figure out the fd is associated with a bpf_prog or bpf_map. The suggested struct bpf_prog_info and struct bpf_map_info are not meant to be a complete list and it is not the goal of this patch. New fields can be added in the future patch. The focus of this patch is to create the interface, BPF_OBJ_GET_INFO_BY_FD cmd for exposing the bpf_prog's and bpf_map's info. The obj's info, which will be extended (and get bigger) over time, is separated from the bpf_attr to avoid bloating the bpf_attr. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/filter.h | 2 - include/uapi/linux/bpf.h | 28 ++++++++ kernel/bpf/syscall.c | 163 ++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 174 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index 1e2dddf21f3b..1fa26dc562ce 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -69,8 +69,6 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 -#define BPF_TAG_SIZE 8 - /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index dd23f47ff00c..9b2c10b45733 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -86,6 +86,7 @@ enum bpf_cmd { BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, + BPF_OBJ_GET_INFO_BY_FD, }; enum bpf_map_type { @@ -222,6 +223,12 @@ union bpf_attr { }; __u32 next_id; }; + + struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ + __u32 bpf_fd; + __u32 info_len; + __aligned_u64 info; + } info; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -686,4 +693,25 @@ struct xdp_md { __u32 data_end; }; +#define BPF_TAG_SIZE 8 + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[BPF_TAG_SIZE]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __aligned_u64 jited_prog_insns; + __aligned_u64 xlated_prog_insns; +} __attribute__((aligned(8))); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1802bb9c47d9..8942c820d620 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1239,6 +1239,145 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) return fd; } +static int check_uarg_tail_zero(void __user *uaddr, + size_t expected_size, + size_t actual_size) +{ + unsigned char __user *addr; + unsigned char __user *end; + unsigned char val; + int err; + + if (actual_size <= expected_size) + return 0; + + addr = uaddr + expected_size; + end = uaddr + actual_size; + + for (; addr < end; addr++) { + err = get_user(val, addr); + if (err) + return err; + if (val) + return -E2BIG; + } + + return 0; +} + +static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, + const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); + struct bpf_prog_info info = {}; + u32 info_len = attr->info.info_len; + char __user *uinsns; + u32 ulen; + int err; + + err = check_uarg_tail_zero(uinfo, sizeof(info), info_len); + if (err) + return err; + info_len = min_t(u32, sizeof(info), info_len); + + if (copy_from_user(&info, uinfo, info_len)) + return err; + + info.type = prog->type; + info.id = prog->aux->id; + + memcpy(info.tag, prog->tag, sizeof(prog->tag)); + + if (!capable(CAP_SYS_ADMIN)) { + info.jited_prog_len = 0; + info.xlated_prog_len = 0; + goto done; + } + + ulen = info.jited_prog_len; + info.jited_prog_len = prog->jited_len; + if (info.jited_prog_len && ulen) { + uinsns = u64_to_user_ptr(info.jited_prog_insns); + ulen = min_t(u32, info.jited_prog_len, ulen); + if (copy_to_user(uinsns, prog->bpf_func, ulen)) + return -EFAULT; + } + + ulen = info.xlated_prog_len; + info.xlated_prog_len = bpf_prog_size(prog->len); + if (info.xlated_prog_len && ulen) { + uinsns = u64_to_user_ptr(info.xlated_prog_insns); + ulen = min_t(u32, info.xlated_prog_len, ulen); + if (copy_to_user(uinsns, prog->insnsi, ulen)) + return -EFAULT; + } + +done: + if (copy_to_user(uinfo, &info, info_len) || + put_user(info_len, &uattr->info.info_len)) + return -EFAULT; + + return 0; +} + +static int bpf_map_get_info_by_fd(struct bpf_map *map, + const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); + struct bpf_map_info info = {}; + u32 info_len = attr->info.info_len; + int err; + + err = check_uarg_tail_zero(uinfo, sizeof(info), info_len); + if (err) + return err; + info_len = min_t(u32, sizeof(info), info_len); + + info.type = map->map_type; + info.id = map->id; + info.key_size = map->key_size; + info.value_size = map->value_size; + info.max_entries = map->max_entries; + info.map_flags = map->map_flags; + + if (copy_to_user(uinfo, &info, info_len) || + put_user(info_len, &uattr->info.info_len)) + return -EFAULT; + + return 0; +} + +#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info + +static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + int ufd = attr->info.bpf_fd; + struct fd f; + int err; + + if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) + return -EINVAL; + + f = fdget(ufd); + if (!f.file) + return -EBADFD; + + if (f.file->f_op == &bpf_prog_fops) + err = bpf_prog_get_info_by_fd(f.file->private_data, attr, + uattr); + else if (f.file->f_op == &bpf_map_fops) + err = bpf_map_get_info_by_fd(f.file->private_data, attr, + uattr); + else + err = -EINVAL; + + fdput(f); + return err; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr = {}; @@ -1258,23 +1397,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz * user-space does not rely on any kernel feature * extensions we dont know about yet. */ - if (size > sizeof(attr)) { - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - - addr = (void __user *)uattr + sizeof(attr); - end = (void __user *)uattr + size; - - for (; addr < end; addr++) { - err = get_user(val, addr); - if (err) - return err; - if (val) - return -E2BIG; - } - size = sizeof(attr); - } + err = check_uarg_tail_zero(uattr, sizeof(attr), size); + if (err) + return err; + size = min_t(u32, size, sizeof(attr)); /* copy attributes from user space, may be less than sizeof(bpf_attr) */ if (copy_from_user(&attr, uattr, size) != 0) @@ -1330,6 +1456,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_MAP_GET_FD_BY_ID: err = bpf_map_get_fd_by_id(&attr); break; + case BPF_OBJ_GET_INFO_BY_FD: + err = bpf_obj_get_info_by_fd(&attr, uattr); + break; default: err = -EINVAL; break; -- cgit v1.2.3 From abb2ea7dfd82451d85ce669b811310c05ab5ca46 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jun 2017 13:36:24 -0700 Subject: compiler, clang: suppress warning for unused static inline functions GCC explicitly does not warn for unused static inline functions for -Wunused-function. The manual states: Warn whenever a static function is declared but not defined or a non-inline static function is unused. Clang does warn for static inline functions that are unused. It turns out that suppressing the warnings avoids potentially complex #ifdef directives, which also reduces LOC. Suppress the warning for clang. Signed-off-by: David Rientjes Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index de179993e039..ea9126006a69 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,3 +15,10 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* + * GCC does not warn about unused static inline functions for + * -Wunused-function. This turns out to avoid the need for complex #ifdef + * directives. Suppress the warning in clang as well. + */ +#define inline inline __attribute__((unused)) -- cgit v1.2.3 From 5acde34a5a420ffe7441bb7d3909dc2618025c3c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Jun 2017 12:22:50 +0100 Subject: net: phy: add 802.3 clause 45 support to phylib Add generic helpers for 802.3 clause 45 PHYs for >= 10Gbps support. Reviewed-by: Andrew Lunn Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Makefile | 2 +- drivers/net/phy/phy-c45.c | 234 +++++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/phy_device.c | 20 ++-- include/linux/phy.h | 12 +++ 4 files changed, 253 insertions(+), 15 deletions(-) create mode 100644 drivers/net/phy/phy-c45.c (limited to 'include') diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index e2fde094f63d..ae58f507aba9 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,6 +1,6 @@ # Makefile for Linux PHY drivers and MDIO bus drivers -libphy-y := phy.o phy-core.o phy_device.o +libphy-y := phy.o phy-c45.o phy-core.o phy_device.o mdio-bus-y += mdio_bus.o mdio_device.o ifdef CONFIG_MDIO_DEVICE diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c new file mode 100644 index 000000000000..d311d6e6141c --- /dev/null +++ b/drivers/net/phy/phy-c45.c @@ -0,0 +1,234 @@ +/* + * Clause 45 PHY support + */ +#include +#include +#include +#include +#include + +/** + * genphy_c45_setup_forced - configures a forced speed + * @phydev: target phy_device struct + */ +int genphy_c45_pma_setup_forced(struct phy_device *phydev) +{ + int ctrl1, ctrl2, ret; + + /* Half duplex is not supported */ + if (phydev->duplex != DUPLEX_FULL) + return -EINVAL; + + ctrl1 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1); + if (ctrl1 < 0) + return ctrl1; + + ctrl2 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2); + if (ctrl2 < 0) + return ctrl2; + + ctrl1 &= ~MDIO_CTRL1_SPEEDSEL; + /* + * PMA/PMD type selection is 1.7.5:0 not 1.7.3:0. See 45.2.1.6.1 + * in 802.3-2012 and 802.3-2015. + */ + ctrl2 &= ~(MDIO_PMA_CTRL2_TYPE | 0x30); + + switch (phydev->speed) { + case SPEED_10: + ctrl2 |= MDIO_PMA_CTRL2_10BT; + break; + case SPEED_100: + ctrl1 |= MDIO_PMA_CTRL1_SPEED100; + ctrl2 |= MDIO_PMA_CTRL2_100BTX; + break; + case SPEED_1000: + ctrl1 |= MDIO_PMA_CTRL1_SPEED1000; + /* Assume 1000base-T */ + ctrl2 |= MDIO_PMA_CTRL2_1000BT; + break; + case SPEED_10000: + ctrl1 |= MDIO_CTRL1_SPEED10G; + /* Assume 10Gbase-T */ + ctrl2 |= MDIO_PMA_CTRL2_10GBT; + break; + default: + return -EINVAL; + } + + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1); + if (ret < 0) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); + +/** + * genphy_c45_an_disable_aneg - disable auto-negotiation + * @phydev: target phy_device struct + * + * Disable auto-negotiation in the Clause 45 PHY. The link parameters + * parameters are controlled through the PMA/PMD MMD registers. + * + * Returns zero on success, negative errno code on failure. + */ +int genphy_c45_an_disable_aneg(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (val < 0) + return val; + + val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); + + return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); +} +EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); + +/** + * genphy_c45_restart_aneg - Enable and restart auto-negotiation + * @phydev: target phy_device struct + * + * This assumes that the auto-negotiation MMD is present. + * + * Enable and restart auto-negotiation. + */ +int genphy_c45_restart_aneg(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (val < 0) + return val; + + val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; + + return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); +} +EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); + +/** + * genphy_c45_aneg_done - return auto-negotiation complete status + * @phydev: target phy_device struct + * + * This assumes that the auto-negotiation MMD is present. + * + * Reads the status register from the auto-negotiation MMD, returning: + * - positive if auto-negotiation is complete + * - negative errno code on error + * - zero otherwise + */ +int genphy_c45_aneg_done(struct phy_device *phydev) +{ + int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + + return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_aneg_done); + +/** + * genphy_c45_read_link - read the overall link status from the MMDs + * @phydev: target phy_device struct + * @mmd_mask: MMDs to read status from + * + * Read the link status from the specified MMDs, and if they all indicate + * that the link is up, return positive. If an error is encountered, + * a negative errno will be returned, otherwise zero. + */ +int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) +{ + int val, devad; + bool link = true; + + while (mmd_mask) { + devad = __ffs(mmd_mask); + mmd_mask &= ~BIT(devad); + + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * register if the link is down. + */ + val = phy_read_mmd(phydev, devad, MDIO_STAT1); + if (val < 0) + return val; + + if (!(val & MDIO_STAT1_LSTATUS)) + link = false; + } + + return link; +} +EXPORT_SYMBOL_GPL(genphy_c45_read_link); + +/** + * genphy_c45_read_lpa - read the link partner advertisment and pause + * @phydev: target phy_device struct + * + * Read the Clause 45 defined base (7.19) and 10G (7.33) status registers, + * filling in the link partner advertisment, pause and asym_pause members + * in @phydev. This assumes that the auto-negotiation MMD is present, and + * the backplane bit (7.48.0) is clear. Clause 45 PHY drivers are expected + * to fill in the remainder of the link partner advert from vendor registers. + */ +int genphy_c45_read_lpa(struct phy_device *phydev) +{ + int val; + + /* Read the link partner's base page advertisment */ + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); + if (val < 0) + return val; + + phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val); + phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; + + /* Read the link partner's 10G advertisment */ + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); + if (val < 0) + return val; + + if (val & MDIO_AN_10GBT_STAT_LP10G) + phydev->lp_advertising |= ADVERTISED_10000baseT_Full; + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_read_lpa); + +/** + * genphy_c45_read_pma - read link speed etc from PMA + * @phydev: target phy_device struct + */ +int genphy_c45_read_pma(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1); + if (val < 0) + return val; + + switch (val & MDIO_CTRL1_SPEEDSEL) { + case 0: + phydev->speed = SPEED_10; + break; + case MDIO_PMA_CTRL1_SPEED100: + phydev->speed = SPEED_100; + break; + case MDIO_PMA_CTRL1_SPEED1000: + phydev->speed = SPEED_1000; + break; + case MDIO_CTRL1_SPEED10G: + phydev->speed = SPEED_10000; + break; + default: + phydev->speed = SPEED_UNKNOWN; + break; + } + + phydev->duplex = DUPLEX_FULL; + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_read_pma); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 37a1e98908e3..6a79e24fa312 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1508,27 +1508,19 @@ EXPORT_SYMBOL(genphy_read_status); static int gen10g_read_status(struct phy_device *phydev) { - int devad, reg; u32 mmd_mask = phydev->c45_ids.devices_in_package; - - phydev->link = 1; + int ret; /* For now just lie and say it's 10G all the time */ phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) { - if (!(mmd_mask & 1)) - continue; + /* Avoid reading the vendor MMDs */ + mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2)); - /* Read twice because link state is latched and a - * read moves the current state into the register - */ - phy_read_mmd(phydev, devad, MDIO_STAT1); - reg = phy_read_mmd(phydev, devad, MDIO_STAT1); - if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) - phydev->link = 0; - } + ret = genphy_c45_read_link(phydev, mmd_mask); + + phydev->link = ret > 0 ? 1 : 0; return 0; } diff --git a/include/linux/phy.h b/include/linux/phy.h index 748e526c0698..a47eb5e841d2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -827,6 +827,8 @@ static inline const char *phydev_name(const struct phy_device *phydev) void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); void phy_attached_info(struct phy_device *phydev); + +/* Clause 22 PHY */ int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); @@ -841,6 +843,16 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } + +/* Clause 45 PHY */ +int genphy_c45_restart_aneg(struct phy_device *phydev); +int genphy_c45_aneg_done(struct phy_device *phydev); +int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask); +int genphy_c45_read_lpa(struct phy_device *phydev); +int genphy_c45_read_pma(struct phy_device *phydev); +int genphy_c45_pma_setup_forced(struct phy_device *phydev); +int genphy_c45_an_disable_aneg(struct phy_device *phydev); + void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); -- cgit v1.2.3 From 002ba7058a7f141cf22d37967a4ef78239c50e9e Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Jun 2017 12:23:00 +0100 Subject: net: phy: hook up clause 45 autonegotiation restart genphy_restart_aneg() can only restart autonegotiation on clause 22 PHYs. Add a phy_restart_aneg() function which selects between the clause 22 and clause 45 restart functionality depending on the PHY type and whether the Clause 45 PHY supports the Clause 22 register set. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 23 +++++++++++++++++++++-- include/linux/phy.h | 1 + 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c232ee04754b..12548e5b6037 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -149,6 +149,25 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) return 0; } +/** + * phy_restart_aneg - restart auto-negotiation + * @phydev: target phy_device struct + * + * Restart the autonegotiation on @phydev. Returns >= 0 on success or + * negative errno on error. + */ +int phy_restart_aneg(struct phy_device *phydev) +{ + int ret; + + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + ret = genphy_c45_restart_aneg(phydev); + else + ret = genphy_restart_aneg(phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_restart_aneg); /** * phy_aneg_done - return auto-negotiation status @@ -1397,7 +1416,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = genphy_restart_aneg(phydev); + ret = phy_restart_aneg(phydev); if (ret < 0) return ret; } @@ -1456,6 +1475,6 @@ int phy_ethtool_nway_reset(struct net_device *ndev) if (!phydev->drv) return -EIO; - return genphy_restart_aneg(phydev); + return phy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); diff --git a/include/linux/phy.h b/include/linux/phy.h index a47eb5e841d2..b24de9ddc886 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -804,6 +804,7 @@ int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); +int phy_restart_aneg(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { -- cgit v1.2.3 From c125ca091873f2e848cc31c2371a3a66c2fd4dd8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Jun 2017 12:23:10 +0100 Subject: net: phy: add XAUI and 10GBASE-KR PHY connection types XAUI allows XGMII to reach an extended distance by using a XGXS layer at each end of the MAC to PHY link, operating over four Serdes lanes. 10GBASE-KR is a single lane Serdes backplane ethernet connection method with autonegotiation on the link. Some PHYs use this to connect to the ethernet interface at 10G speeds, switching to other connection types when utilising slower speeds. 10GBASE-KR is also used for XFI and SFI to connect to XFP and SFP fiber modules. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/ABI/testing/sysfs-class-net-phydev | 2 +- Documentation/devicetree/bindings/net/ethernet.txt | 2 ++ include/linux/phy.h | 7 +++++++ 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-net-phydev b/Documentation/ABI/testing/sysfs-class-net-phydev index c768d5fd8496..6ebabfb27912 100644 --- a/Documentation/ABI/testing/sysfs-class-net-phydev +++ b/Documentation/ABI/testing/sysfs-class-net-phydev @@ -32,5 +32,5 @@ Description: (not available), mii, gmii, sgmii, tbi, rev-mii, rmii, rgmii, rgmii-id, rgmii-rxid, rgmii-txid, rtbi, smii xgmii, moca, qsgmii, trgmii, 1000base-x, 2500base-x, rxaui, - unknown + xaui, 10gbase-kr, unknown diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index 3a6916909d90..d4abe9a98109 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt @@ -32,6 +32,8 @@ The following properties are common to the Ethernet controllers: * "2000base-x", * "2500base-x", * "rxaui" + * "xaui" + * "10gbase-kr" (10GBASE-KR, XFI, SFI) - phy-connection-type: the same as "phy-mode" property but described in ePAPR; - phy-handle: phandle, specifies a reference to a node representing a PHY device; this property is described in ePAPR and so preferred; diff --git a/include/linux/phy.h b/include/linux/phy.h index b24de9ddc886..414242200a90 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -83,6 +83,9 @@ typedef enum { PHY_INTERFACE_MODE_1000BASEX, PHY_INTERFACE_MODE_2500BASEX, PHY_INTERFACE_MODE_RXAUI, + PHY_INTERFACE_MODE_XAUI, + /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ + PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -149,6 +152,10 @@ static inline const char *phy_modes(phy_interface_t interface) return "2500base-x"; case PHY_INTERFACE_MODE_RXAUI: return "rxaui"; + case PHY_INTERFACE_MODE_XAUI: + return "xaui"; + case PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; default: return "unknown"; } -- cgit v1.2.3