summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/ethtool.c30
-rw-r--r--net/core/flow.c4
-rw-r--r--net/ethernet/eth.c21
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/irda/irlan/irlan_eth.c31
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/sched/sch_qfq.c85
-rw-r--r--net/sunrpc/cache.c18
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/rpc_pipe.c40
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c20
17 files changed, 165 insertions, 150 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8a15eaadc4bd..4a78c4de9f20 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
__be16 vlan_proto = skb->vlan_proto;
- u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+ u16 vlan_id = vlan_tx_tag_get_id(skb);
struct net_device *vlan_dev;
struct vlan_pcpu_stats *rx_stats;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3a8c8fd63c88..1cd3d2a406f5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -73,6 +73,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
struct vlan_priority_tci_mapping *mp;
+ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
+
mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
@@ -249,6 +251,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
np->next = mp;
np->priority = skb_prio;
np->vlan_qos = vlan_qos;
+ /* Before inserting this element in hash table, make sure all its fields
+ * are committed to memory.
+ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+ */
+ smp_wmb();
vlan->egress_priority_map[skb_prio & 0xF] = np;
if (vlan_qos)
vlan->nr_egress_mappings++;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 69af490cce44..4b99c9a27044 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -619,6 +619,9 @@ rehash:
mp->br = br;
mp->addr = *group;
+ setup_timer(&mp->timer, br_multicast_group_expired,
+ (unsigned long)mp);
+
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
@@ -1126,7 +1129,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
@@ -1204,7 +1206,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
diff --git a/net/core/dev.c b/net/core/dev.c
index a3d8d44cb7f4..26755dd40daa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3580,8 +3580,15 @@ ncls:
}
}
- if (vlan_tx_nonzero_tag_present(skb))
- skb->pkt_type = PACKET_OTHERHOST;
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ if (vlan_tx_tag_get_id(skb))
+ skb->pkt_type = PACKET_OTHERHOST;
+ /* Note: we might in the future use prio bits
+ * and set skb->priority like in vlan_do_receive()
+ * For the time being, just ignore Priority Code Point
+ */
+ skb->vlan_tci = 0;
+ }
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index ab5fa6336c84..78e9d9223e40 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -279,11 +279,16 @@ static u32 __ethtool_get_flags(struct net_device *dev)
{
u32 flags = 0;
- if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN;
- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN;
- if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
- if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
+ if (dev->features & NETIF_F_LRO)
+ flags |= ETH_FLAG_LRO;
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ flags |= ETH_FLAG_RXVLAN;
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ flags |= ETH_FLAG_TXVLAN;
+ if (dev->features & NETIF_F_NTUPLE)
+ flags |= ETH_FLAG_NTUPLE;
+ if (dev->features & NETIF_F_RXHASH)
+ flags |= ETH_FLAG_RXHASH;
return flags;
}
@@ -295,11 +300,16 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data)
if (data & ~ETH_ALL_FLAGS)
return -EINVAL;
- if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
- if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
- if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;
+ if (data & ETH_FLAG_LRO)
+ features |= NETIF_F_LRO;
+ if (data & ETH_FLAG_RXVLAN)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (data & ETH_FLAG_TXVLAN)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (data & ETH_FLAG_NTUPLE)
+ features |= NETIF_F_NTUPLE;
+ if (data & ETH_FLAG_RXHASH)
+ features |= NETIF_F_RXHASH;
/* allow changing only bits set in hw_features */
changed = (features ^ dev->features) & ETH_ALL_FEATURES;
diff --git a/net/core/flow.c b/net/core/flow.c
index 7102f166482d..dfa602ceb8cd 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -403,7 +403,7 @@ void flow_cache_flush_deferred(void)
schedule_work(&flow_cache_flush_work);
}
-static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
+static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
@@ -421,7 +421,7 @@ static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
return 0;
}
-static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
+static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5359560926bc..be1f64d35358 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -401,27 +401,8 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
}
EXPORT_SYMBOL(alloc_etherdev_mqs);
-static size_t _format_mac_addr(char *buf, int buflen,
- const unsigned char *addr, int len)
-{
- int i;
- char *cp = buf;
-
- for (i = 0; i < len; i++) {
- cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
- if (i == len - 1)
- break;
- cp += scnprintf(cp, buflen - (cp - buf), ":");
- }
- return cp - buf;
-}
-
ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
{
- size_t l;
-
- l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
- l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
- return (ssize_t)l;
+ return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
}
EXPORT_SYMBOL(sysfs_format_mac);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3da817b89e9b..15e3e683adec 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -190,10 +190,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
- __skb_pull(skb, ip_hdrlen(skb));
-
- /* Point into the IP datagram, just past the header. */
- skb_reset_transport_header(skb);
+ __skb_pull(skb, skb_network_header_len(skb));
rcu_read_lock();
{
@@ -437,6 +434,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
goto drop;
}
+ skb->transport_header = skb->network_header + iph->ihl*4;
+
/* Remove any debris in the socket control block */
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b2c123c44d69..610e324348d1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,6 +36,8 @@ static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
+static int tcp_syn_retries_min = 1;
+static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
@@ -332,7 +334,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_syn_retries_min,
+ .extra2 = &tcp_syn_retries_max
},
{
.procname = "tcp_synack_retries",
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b3b5730b48c5..24c03396e008 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -479,7 +479,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
if (ifp) {
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
- override = 0;
+ override = false;
inc_opt |= ifp->idev->cnf.force_tllao;
in6_ifa_put(ifp);
} else {
@@ -557,7 +557,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
}
if (ipv6_addr_any(saddr))
- inc_opt = 0;
+ inc_opt = false;
if (inc_opt)
optlen += ndisc_opt_addr_space(dev);
@@ -790,7 +790,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
- inc != 0 &&
+ inc &&
idev->nd_parms->proxy_delay != 0) {
/*
* for anycast or proxy,
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d14152e866d9..ffcec225b5d9 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -44,12 +44,12 @@ static int irlan_eth_open(struct net_device *dev);
static int irlan_eth_close(struct net_device *dev);
static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void irlan_eth_set_multicast_list( struct net_device *dev);
+static void irlan_eth_set_multicast_list(struct net_device *dev);
static const struct net_device_ops irlan_eth_netdev_ops = {
- .ndo_open = irlan_eth_open,
- .ndo_stop = irlan_eth_close,
- .ndo_start_xmit = irlan_eth_xmit,
+ .ndo_open = irlan_eth_open,
+ .ndo_stop = irlan_eth_close,
+ .ndo_start_xmit = irlan_eth_xmit,
.ndo_set_rx_mode = irlan_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -110,7 +110,7 @@ static int irlan_eth_open(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__ );
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Ready to play! */
netif_stop_queue(dev); /* Wait until data link is ready */
@@ -137,7 +137,7 @@ static int irlan_eth_close(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__ );
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Stop device */
netif_stop_queue(dev);
@@ -310,35 +310,32 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__ );
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Check if data channel has been connected yet */
if (self->client.state != IRLAN_DATA) {
- IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
+ IRDA_DEBUG(1, "%s(), delaying!\n", __func__);
return;
}
if (dev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
- }
- else if ((dev->flags & IFF_ALLMULTI) ||
+ } else if ((dev->flags & IFF_ALLMULTI) ||
netdev_mc_count(dev) > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
- IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
+ IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
/* hardware_set_filter(NULL); */
irlan_set_multicast_filter(self, TRUE);
- }
- else if (!netdev_mc_empty(dev)) {
- IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
+ } else if (!netdev_mc_empty(dev)) {
+ IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
irlan_set_multicast_filter(self, TRUE);
- }
- else {
- IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
+ } else {
+ IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__);
irlan_set_multicast_filter(self, FALSE);
}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 4fe76ff214c2..cd5b8ec9be04 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -621,7 +621,7 @@ static void iucv_disable(void)
put_online_cpus();
}
-static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+static int iucv_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
cpumask_t cpumask;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index a7ab323849b6..8056fb4e618a 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -113,7 +113,6 @@
#define FRAC_BITS 30 /* fixed point arithmetic */
#define ONE_FP (1UL << FRAC_BITS)
-#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
#define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
@@ -189,6 +188,7 @@ struct qfq_sched {
struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
u32 num_active_agg; /* Num. of active aggregates */
u32 wsum; /* weight sum */
+ u32 iwsum; /* inverse weight sum */
unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
@@ -314,6 +314,7 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
q->wsum +=
(int) agg->class_weight * (new_num_classes - agg->num_classes);
+ q->iwsum = ONE_FP / q->wsum;
agg->num_classes = new_num_classes;
}
@@ -340,6 +341,10 @@ static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
{
if (!hlist_unhashed(&agg->nonfull_next))
hlist_del_init(&agg->nonfull_next);
+ q->wsum -= agg->class_weight;
+ if (q->wsum != 0)
+ q->iwsum = ONE_FP / q->wsum;
+
if (q->in_serv_agg == agg)
q->in_serv_agg = qfq_choose_next_agg(q);
kfree(agg);
@@ -834,38 +839,60 @@ static void qfq_make_eligible(struct qfq_sched *q)
}
}
-
/*
- * The index of the slot in which the aggregate is to be inserted must
- * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
- * because the start time of the group may be moved backward by one
- * slot after the aggregate has been inserted, and this would cause
- * non-empty slots to be right-shifted by one position.
+ * The index of the slot in which the input aggregate agg is to be
+ * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
+ * and not a '-1' because the start time of the group may be moved
+ * backward by one slot after the aggregate has been inserted, and
+ * this would cause non-empty slots to be right-shifted by one
+ * position.
+ *
+ * QFQ+ fully satisfies this bound to the slot index if the parameters
+ * of the classes are not changed dynamically, and if QFQ+ never
+ * happens to postpone the service of agg unjustly, i.e., it never
+ * happens that the aggregate becomes backlogged and eligible, or just
+ * eligible, while an aggregate with a higher approximated finish time
+ * is being served. In particular, in this case QFQ+ guarantees that
+ * the timestamps of agg are low enough that the slot index is never
+ * higher than 2. Unfortunately, QFQ+ cannot provide the same
+ * guarantee if it happens to unjustly postpone the service of agg, or
+ * if the parameters of some class are changed.
+ *
+ * As for the first event, i.e., an out-of-order service, the
+ * upper bound to the slot index guaranteed by QFQ+ grows to
+ * 2 +
+ * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
+ * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
*
- * If the weight and lmax (max_pkt_size) of the classes do not change,
- * then QFQ+ does meet the above contraint according to the current
- * values of its parameters. In fact, if the weight and lmax of the
- * classes do not change, then, from the theory, QFQ+ guarantees that
- * the slot index is never higher than
- * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
- * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
+ * The following function deals with this problem by backward-shifting
+ * the timestamps of agg, if needed, so as to guarantee that the slot
+ * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
+ * cause the service of other aggregates to be postponed, yet the
+ * worst-case guarantees of these aggregates are not violated. In
+ * fact, in case of no out-of-order service, the timestamps of agg
+ * would have been even lower than they are after the backward shift,
+ * because QFQ+ would have guaranteed a maximum value equal to 2 for
+ * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
+ * service is postponed because of the backward-shift would have
+ * however waited for the service of agg before being served.
*
- * When the weight of a class is increased or the lmax of the class is
- * decreased, a new aggregate with smaller slot size than the original
- * parent aggregate of the class may happen to be activated. The
- * activation of this aggregate should be properly delayed to when the
- * service of the class has finished in the ideal system tracked by
- * QFQ+. If the activation of the aggregate is not delayed to this
- * reference time instant, then this aggregate may be unjustly served
- * before other aggregates waiting for service. This may cause the
- * above bound to the slot index to be violated for some of these
- * unlucky aggregates.
+ * The other event that may cause the slot index to be higher than 2
+ * for agg is a recent change of the parameters of some class. If the
+ * weight of a class is increased or the lmax (max_pkt_size) of the
+ * class is decreased, then a new aggregate with smaller slot size
+ * than the original parent aggregate of the class may happen to be
+ * activated. The activation of this aggregate should be properly
+ * delayed to when the service of the class has finished in the ideal
+ * system tracked by QFQ+. If the activation of the aggregate is not
+ * delayed to this reference time instant, then this aggregate may be
+ * unjustly served before other aggregates waiting for service. This
+ * may cause the above bound to the slot index to be violated for some
+ * of these unlucky aggregates.
*
* Instead of delaying the activation of the new aggregate, which is
- * quite complex, the following inaccurate but simple solution is used:
- * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
- * timestamps of the aggregate are shifted backward so as to let the
- * slot index become equal to QFQ_MAX_SLOTS-2.
+ * quite complex, the above-discussed capping of the slot index is
+ * used to handle also the consequences of a change of the parameters
+ * of a class.
*/
static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
u64 roundedS)
@@ -1136,7 +1163,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
else
in_serv_agg->budget -= len;
- q->V += (u64)len * IWSUM;
+ q->V += (u64)len * q->iwsum;
pr_debug("qfq dequeue: len %u F %lld now %lld\n",
len, (unsigned long long) in_serv_agg->F,
(unsigned long long) q->V);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 49eb37010aa3..a72de074172d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1823,19 +1823,11 @@ int sunrpc_cache_register_pipefs(struct dentry *parent,
const char *name, umode_t umode,
struct cache_detail *cd)
{
- struct qstr q;
- struct dentry *dir;
- int ret = 0;
-
- q.name = name;
- q.len = strlen(name);
- q.hash = full_name_hash(q.name, q.len);
- dir = rpc_create_cache_dir(parent, &q, umode, cd);
- if (!IS_ERR(dir))
- cd->u.pipefs.dir = dir;
- else
- ret = PTR_ERR(dir);
- return ret;
+ struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ cd->u.pipefs.dir = dir;
+ return 0;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa401560777b..9963584605c0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -128,9 +128,7 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
{
static uint32_t clntid;
char name[15];
- struct qstr q = { .name = name };
struct dentry *dir, *dentry;
- int error;
dir = rpc_d_lookup_sb(sb, dir_name);
if (dir == NULL) {
@@ -138,19 +136,17 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
return dir;
}
for (;;) {
- q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
+ snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
name[sizeof(name) - 1] = '\0';
- q.hash = full_name_hash(q.name, q.len);
- dentry = rpc_create_client_dir(dir, &q, clnt);
+ dentry = rpc_create_client_dir(dir, name, clnt);
if (!IS_ERR(dentry))
break;
- error = PTR_ERR(dentry);
- if (error != -EEXIST) {
- printk(KERN_INFO "RPC: Couldn't create pipefs entry"
- " %s/%s, error %d\n",
- dir_name, name, error);
- break;
- }
+ if (dentry == ERR_PTR(-EEXIST))
+ continue;
+ printk(KERN_INFO "RPC: Couldn't create pipefs entry"
+ " %s/%s, error %ld\n",
+ dir_name, name, PTR_ERR(dentry));
+ break;
}
dput(dir);
return dentry;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 61239a2cb786..406859cc68aa 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -673,13 +673,12 @@ static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
}
static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
- struct qstr *name)
+ const char *name)
{
- struct dentry *dentry;
-
- dentry = d_lookup(parent, name);
+ struct qstr q = QSTR_INIT(name, strlen(name));
+ struct dentry *dentry = d_hash_and_lookup(parent, &q);
if (!dentry) {
- dentry = d_alloc(parent, name);
+ dentry = d_alloc(parent, &q);
if (!dentry)
return ERR_PTR(-ENOMEM);
}
@@ -704,8 +703,7 @@ static void __rpc_depopulate(struct dentry *parent,
for (i = start; i < eof; i++) {
name.name = files[i].name;
name.len = strlen(files[i].name);
- name.hash = full_name_hash(name.name, name.len);
- dentry = d_lookup(parent, &name);
+ dentry = d_hash_and_lookup(parent, &name);
if (dentry == NULL)
continue;
@@ -747,12 +745,7 @@ static int rpc_populate(struct dentry *parent,
mutex_lock(&dir->i_mutex);
for (i = start; i < eof; i++) {
- struct qstr q;
-
- q.name = files[i].name;
- q.len = strlen(files[i].name);
- q.hash = full_name_hash(q.name, q.len);
- dentry = __rpc_lookup_create_exclusive(parent, &q);
+ dentry = __rpc_lookup_create_exclusive(parent, files[i].name);
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_bad;
@@ -785,7 +778,7 @@ out_bad:
}
static struct dentry *rpc_mkdir_populate(struct dentry *parent,
- struct qstr *name, umode_t mode, void *private,
+ const char *name, umode_t mode, void *private,
int (*populate)(struct dentry *, void *), void *args_populate)
{
struct dentry *dentry;
@@ -856,7 +849,6 @@ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
struct dentry *dentry;
struct inode *dir = parent->d_inode;
umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
- struct qstr q;
int err;
if (pipe->ops->upcall == NULL)
@@ -864,12 +856,8 @@ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
if (pipe->ops->downcall == NULL)
umode &= ~S_IWUGO;
- q.name = name;
- q.len = strlen(name);
- q.hash = full_name_hash(q.name, q.len),
-
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
- dentry = __rpc_lookup_create_exclusive(parent, &q);
+ dentry = __rpc_lookup_create_exclusive(parent, name);
if (IS_ERR(dentry))
goto out;
err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops,
@@ -940,8 +928,8 @@ static void rpc_clntdir_depopulate(struct dentry *dentry)
/**
* rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs
- * @dentry: dentry from the rpc_pipefs root to the new directory
- * @name: &struct qstr for the name
+ * @dentry: the parent of new directory
+ * @name: the name of new directory
* @rpc_client: rpc client to associate with this directory
*
* This creates a directory at the given @path associated with
@@ -950,7 +938,7 @@ static void rpc_clntdir_depopulate(struct dentry *dentry)
* later be created using rpc_mkpipe().
*/
struct dentry *rpc_create_client_dir(struct dentry *dentry,
- struct qstr *name,
+ const char *name,
struct rpc_clnt *rpc_client)
{
return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL,
@@ -996,7 +984,7 @@ static void rpc_cachedir_depopulate(struct dentry *dentry)
rpc_depopulate(dentry, cache_pipefs_files, 0, 3);
}
-struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
+struct dentry *rpc_create_cache_dir(struct dentry *parent, const char *name,
umode_t umode, struct cache_detail *cd)
{
return rpc_mkdir_populate(parent, name, umode, NULL,
@@ -1076,9 +1064,7 @@ struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
-
- dir.hash = full_name_hash(dir.name, dir.len);
- return d_lookup(sb->s_root, &dir);
+ return d_hash_and_lookup(sb->s_root, &dir);
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 8d2edddf48cf..65b146297f5a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -98,6 +98,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*/
static u32 *decode_write_list(u32 *va, u32 *vaend)
{
+ unsigned long start, end;
int nchunks;
struct rpcrdma_write_array *ary =
@@ -113,9 +114,12 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
return NULL;
}
nchunks = ntohl(ary->wc_nchunks);
- if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
- (unsigned long)vaend) {
+
+ start = (unsigned long)&ary->wc_array[0];
+ end = (unsigned long)vaend;
+ if (nchunks < 0 ||
+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, nchunks, vaend);
return NULL;
@@ -129,6 +133,7 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
static u32 *decode_reply_array(u32 *va, u32 *vaend)
{
+ unsigned long start, end;
int nchunks;
struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va;
@@ -143,9 +148,12 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
return NULL;
}
nchunks = ntohl(ary->wc_nchunks);
- if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
- (unsigned long)vaend) {
+
+ start = (unsigned long)&ary->wc_array[0];
+ end = (unsigned long)vaend;
+ if (nchunks < 0 ||
+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, nchunks, vaend);
return NULL;