diff options
author | Jakub Kicinski <kuba@kernel.org> | 2020-10-30 00:08:40 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2020-10-30 00:08:40 +0300 |
commit | 1c29d9899081d090cbe2aab128e527354af7f343 (patch) | |
tree | 83e3376286d1a808d3f73276750b558776c272ac /net | |
parent | cd29296fdfca919590e4004a7e4905544f4c4a32 (diff) | |
parent | 07e0887302450a62f51dba72df6afb5fabb23d1c (diff) | |
download | linux-1c29d9899081d090cbe2aab128e527354af7f343.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/devlink.c | 30 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 3 | ||||
-rw-r--r-- | net/mptcp/protocol.c | 10 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 5 | ||||
-rw-r--r-- | net/sched/act_mpls.c | 1 | ||||
-rw-r--r-- | net/sched/cls_api.c | 4 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 9 | ||||
-rw-r--r-- | net/smc/af_smc.c | 7 | ||||
-rw-r--r-- | net/smc/smc_core.c | 7 | ||||
-rw-r--r-- | net/tipc/crypto.c | 4 | ||||
-rw-r--r-- | net/tipc/msg.c | 5 | ||||
-rw-r--r-- | net/vmw_vsock/af_vsock.c | 2 |
14 files changed, 65 insertions, 28 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 55f66e108059..9e7f071b846c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -145,6 +145,7 @@ #include <linux/indirect_call_wrapper.h> #include <net/devlink.h> #include <linux/pm_runtime.h> +#include <linux/prandom.h> #include "net-sysfs.h" @@ -3558,6 +3559,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, dev_queue_xmit_nit(skb, dev); len = skb->len; + PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); trace_net_dev_start_xmit(skb, dev); rc = netdev_start_xmit(skb, dev, txq, more); trace_net_dev_xmit(skb, rc, dev, len); @@ -4130,6 +4132,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (!skb) goto out; + PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -4195,6 +4198,7 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) skb_set_queue_mapping(skb, queue_id); txq = skb_get_tx_queue(dev, skb); + PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); local_bh_disable(); diff --git a/net/core/devlink.c b/net/core/devlink.c index a578634052a3..a932d95be798 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4213,10 +4213,12 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, if (err) goto nla_put_failure; - if (region->port) - if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, - region->port->index)) + if (region->port) { + err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, + region->port->index); + if (err) goto nla_put_failure; + } err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name); if (err) @@ -4265,10 +4267,12 @@ devlink_nl_region_notify_build(struct devlink_region *region, if (err) goto out_cancel_msg; - if (region->port) - if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, - region->port->index)) + if (region->port) { + err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, + region->port->index); + if (err) goto out_cancel_msg; + } err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name); @@ -4915,8 +4919,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); port = devlink_port_get_by_index(devlink, index); - if (!port) - return -ENODEV; + if (!port) { + err = -ENODEV; + goto out_unlock; + } } region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]); @@ -4962,10 +4968,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, if (err) goto nla_put_failure; - if (region->port) - if (nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX, - region->port->index)) + if (region->port) { + err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX, + region->port->index); + if (err) goto nla_put_failure; + } err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name); if (err) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bae4284bf542..b2bc3d7fe9e8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -485,6 +485,8 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, return true; if (tcp_rmem_pressure(sk)) return true; + if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss) + return true; } if (sk->sk_prot->stream_memory_read) return sk->sk_prot->stream_memory_read(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fc445833b5e5..389d1b340248 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4908,7 +4908,8 @@ void tcp_data_ready(struct sock *sk) int avail = tp->rcv_nxt - tp->copied_seq; if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && - !sock_flag(sk, SOCK_DONE)) + !sock_flag(sk, SOCK_DONE) && + tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss) return; sk->sk_data_ready(sk); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 185dacb39781..e7419fd15d84 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -274,6 +274,15 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, skb_ext_reset(skb); skb_orphan(skb); + /* try to fetch required memory from subflow */ + if (!sk_rmem_schedule(sk, skb, skb->truesize)) { + if (ssk->sk_forward_alloc < skb->truesize) + goto drop; + __sk_mem_reclaim(ssk, skb->truesize); + if (!sk_rmem_schedule(sk, skb, skb->truesize)) + goto drop; + } + /* the skb map_seq accounts for the skb offset: * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq * value @@ -301,6 +310,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, * will retransmit as needed, if needed. */ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); +drop: mptcp_drop(sk, skb); return false; } diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 06603dd1c8aa..b36b60668b1d 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6) rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, conn->c_proposed_version, UINT_MAX, UINT_MAX, isv6); - ret = rdma_connect(cm_id, &conn_param); + ret = rdma_connect_locked(cm_id, &conn_param); if (ret) - rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); + rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n", + ret); out: /* Beware - returning non-zero tells the rdma_cm to destroy diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index f40bf9771cb9..5c7456e5b5cf 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -426,6 +426,7 @@ static void __exit mpls_cleanup_module(void) module_init(mpls_init_module); module_exit(mpls_cleanup_module); +MODULE_SOFTDEP("post: mpls_gso"); MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MPLS manipulation actions"); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index faeabff283a2..838b3fd94d77 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -652,12 +652,12 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) block_cb->indr.binder_type, &block->flow_block, tcf_block_shared(block), &extack); + rtnl_lock(); down_write(&block->cb_lock); list_del(&block_cb->driver_list); list_move(&block_cb->list, &bo.cb_list); - up_write(&block->cb_lock); - rtnl_lock(); tcf_block_unbind(block, &bo); + up_write(&block->cb_lock); rtnl_unlock(); } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 84f82771cdf5..0c345e43a09a 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma, /* default uniform distribution */ if (dist == NULL) - return ((rnd % (2 * sigma)) + mu) - sigma; + return ((rnd % (2 * (u32)sigma)) + mu) - sigma; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; @@ -812,6 +812,10 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) q->slot_config.max_packets = INT_MAX; if (q->slot_config.max_bytes == 0) q->slot_config.max_bytes = INT_MAX; + + /* capping dist_jitter to the range acceptable by tabledist() */ + q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter)); + q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; if (q->slot_config.min_delay | q->slot_config.max_delay | @@ -1037,6 +1041,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_NETEM_SLOT]) get_slot(q, tb[TCA_NETEM_SLOT]); + /* capping jitter to the range acceptable by tabledist() */ + q->jitter = min_t(s64, abs(q->jitter), INT_MAX); + return ret; get_table_failure: diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 82be0bd0f6e8..e9f487c8c6d5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1317,10 +1317,10 @@ static void smc_listen_out_err(struct smc_sock *new_smc) /* listen worker: decline and fall back if possible */ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, - struct smc_init_info *ini, u8 version) + int local_first, u8 version) { /* RDMA setup failed, switch back to TCP */ - if (ini->first_contact_local) + if (local_first) smc_lgr_cleanup_early(&new_smc->conn); else smc_conn_free(&new_smc->conn); @@ -1768,7 +1768,8 @@ static void smc_listen_work(struct work_struct *work) out_unlock: mutex_unlock(&smc_server_lgr_pending); out_decl: - smc_listen_decline(new_smc, rc, ini, version); + smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0, + version); out_free: kfree(ini); kfree(buf); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index d790c43c473f..2b19863f7171 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1615,8 +1615,11 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); if (rc) { kfree(buf_desc); - return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) : - ERR_PTR(-EIO); + if (rc == -ENOMEM) + return ERR_PTR(-EAGAIN); + if (rc == -ENOSPC) + return ERR_PTR(-ENOSPC); + return ERR_PTR(-EIO); } buf_desc->pages = virt_to_page(buf_desc->cpu_addr); /* CDC header stored in buf. So, pretend it was smaller */ diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 40c44101fe8e..740ab9ae41a6 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -418,7 +418,7 @@ static void tipc_aead_free(struct rcu_head *rp) kfree(head); } free_percpu(aead->tfm_entry); - kzfree(aead->key); + kfree_sensitive(aead->key); kfree(aead); } @@ -2452,7 +2452,7 @@ static void tipc_crypto_work_tx(struct work_struct *work) tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); if (likely(rc > 0)) rc = tipc_crypto_key_distr(tx, rc, NULL); - kzfree(skey); + kfree_sensitive(skey); } if (unlikely(rc)) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 2a78aa701572..32c79c59052b 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -150,12 +150,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - if (skb_cloned(frag)) - frag = skb_copy(frag, GFP_ATOMIC); + *buf = NULL; + frag = skb_unshare(frag, GFP_ATOMIC); if (unlikely(!frag)) goto err; head = *headbuf = frag; - *buf = NULL; TIPC_SKB_CB(head)->tail = NULL; if (skb_is_nonlinear(head)) { skb_walk_frags(head, tail) { diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 9e93bc201cc0..b4d7b8aba003 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -739,7 +739,7 @@ static struct sock *__vsock_create(struct net *net, vsk->buffer_min_size = psk->buffer_min_size; vsk->buffer_max_size = psk->buffer_max_size; } else { - vsk->trusted = capable(CAP_NET_ADMIN); + vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE; |