diff options
author | David S. Miller <davem@davemloft.net> | 2017-11-15 08:14:17 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-11-15 08:14:17 +0300 |
commit | 69d481791f38f692707254406945d35591d12f40 (patch) | |
tree | 67e5e1fab1969ec245fea1b3a232dc16b89c9f32 | |
parent | 6670e152447732ba90626f36dfc015a13fbf150e (diff) | |
parent | 9b0ed89172efec1d9f214d173ad6046f10f6b742 (diff) | |
download | linux-69d481791f38f692707254406945d35591d12f40.tar.xz |
Merge branch 'netem-fix-compilation-on-32-bit'
Stephen Hemminger says:
====================
netem: fix compilation on 32 bit
A couple of places where 64 bit CPU was being assumed incorrectly.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_netem.c | 17 |
1 files changed, 7 insertions, 10 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b686e755fda9..dd70924cbcdf 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -312,9 +312,9 @@ static bool loss_event(struct netem_sched_data *q) * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ -static s64 tabledist(s64 mu, s64 sigma, +static s64 tabledist(s64 mu, s32 sigma, struct crndstate *state, - const struct disttable *dist) + const struct disttable *dist) { s64 x; long t; @@ -327,7 +327,7 @@ static s64 tabledist(s64 mu, s64 sigma, /* default uniform distribution */ if (dist == NULL) - return (rnd % (2*sigma)) - sigma + mu; + return (rnd % (2 * sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; @@ -339,10 +339,8 @@ static s64 tabledist(s64 mu, s64 sigma, return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } -static u64 packet_len_2_sched_time(unsigned int len, - struct netem_sched_data *q) +static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) { - u64 offset; len += q->packet_overhead; if (q->cell_size) { @@ -352,9 +350,8 @@ static u64 packet_len_2_sched_time(unsigned int len, cells++; len = cells * (q->cell_size + q->cell_overhead); } - offset = (u64)len * NSEC_PER_SEC; - do_div(offset, q->rate); - return offset; + + return div64_u64(len * NSEC_PER_SEC, q->rate); } static void tfifo_reset(struct Qdisc *sch) @@ -556,7 +553,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, now = last->time_to_send; } - delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); + delay += packet_time_ns(qdisc_pkt_len(skb), q); } cb->time_to_send = now + delay; |