summaryrefslogtreecommitdiff
path: root/drivers/net/geneve.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-10-09 18:56:02 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-10-16 18:40:58 +0300
commit16e7483e6f02973972f832b18042fd6c45fe26c0 (patch)
tree205122d1996a983619ccd55c572c34f037a76718 /drivers/net/geneve.c
parentbf6a47644ea0928b2a6589ba9fb1221116d8bfaf (diff)
parent0c16d9635e3a51377e5815b9f8e14f497a4dbb42 (diff)
downloadlinux-16e7483e6f02973972f832b18042fd6c45fe26c0.tar.xz
Merge branch 'dynamic_sg' into rdma.git for-next
From Maor Gottlieb says: ==================== This series extends __sg_alloc_table_from_pages to allow chaining of new pages to an already initialized SG table. This allows for drivers to utilize the optimization of merging contiguous pages without a need to pre allocate all the pages and hold them in a very large temporary buffer prior to the call to SG table initialization. The last patch changes the Infiniband core to use the new API. It removes duplicate functionality from the code and benefits from the optimization of allocating dynamic SG table from pages. In huge pages system of 2MB page size, without this change, the SG table would contain x512 SG entries. ==================== * branch 'dynamic_sg': RDMA/umem: Move to allocate SG table from pages lib/scatterlist: Add support in dynamic allocation of SG table from pages tools/testing/scatterlist: Show errors in human readable form tools/testing/scatterlist: Rejuvenate bit-rotten test
Diffstat (limited to 'drivers/net/geneve.c')
-rw-r--r--drivers/net/geneve.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c71f994fbc73..974a244f45ba 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -777,7 +777,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs4,
struct flowi4 *fl4,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -793,6 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->flowi4_proto = IPPROTO_UDP;
fl4->daddr = info->key.u.ipv4.dst;
fl4->saddr = info->key.u.ipv4.src;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
tos = info->key.tos;
if ((tos == 1) && !geneve->cfg.collect_md) {
@@ -827,7 +830,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs6,
struct flowi6 *fl6,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -843,6 +847,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
fl6->flowi6_proto = IPPROTO_UDP;
fl6->daddr = info->key.u.ipv6.dst;
fl6->saddr = info->key.u.ipv6.src;
+ fl6->fl6_dport = dport;
+ fl6->fl6_sport = sport;
+
prio = info->key.tos;
if ((prio == 1) && !geneve->cfg.collect_md) {
prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
@@ -889,7 +896,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -919,7 +928,6 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
}
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->cfg.collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -974,7 +982,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -1003,7 +1013,6 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
}
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->cfg.collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -1085,13 +1094,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct geneve_dev *geneve = netdev_priv(dev);
+ __be16 sport;
if (ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
struct flowi4 fl4;
+
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1101,9 +1115,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
struct flowi6 fl6;
+
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -1114,8 +1132,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
return -EINVAL;
}
- info->key.tp_src = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ info->key.tp_src = sport;
info->key.tp_dst = geneve->cfg.info.key.tp_dst;
return 0;
}