From 74d23cc704d19732e70ef1579a669f7d5f09dd9a Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sun, 21 Dec 2014 19:46:56 +0100 Subject: time: move the timecounter/cyclecounter code into its own file. The timecounter code has almost nothing to do with the clocksource code. Let it live in its own file. This will help isolate the timecounter users from the clocksource users in the source tree. Signed-off-by: Richard Cochran Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- include/linux/clocksource.h | 102 ------------------------------------ include/linux/mlx4/device.h | 2 +- include/linux/timecounter.h | 122 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/types.h | 3 ++ 4 files changed, 126 insertions(+), 103 deletions(-) create mode 100644 include/linux/timecounter.h (limited to 'include/linux') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index abcafaa20b86..9c78d15d33e4 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -18,8 +18,6 @@ #include #include -/* clocksource cycle base type */ -typedef u64 cycle_t; struct clocksource; struct module; @@ -27,106 +25,6 @@ struct module; #include #endif -/** - * struct cyclecounter - hardware abstraction for a free running counter - * Provides completely state-free accessors to the underlying hardware. - * Depending on which hardware it reads, the cycle counter may wrap - * around quickly. Locking rules (if necessary) have to be defined - * by the implementor and user of specific instances of this API. - * - * @read: returns the current cycle value - * @mask: bitmask for two's complement - * subtraction of non 64 bit counters, - * see CLOCKSOURCE_MASK() helper macro - * @mult: cycle to nanosecond multiplier - * @shift: cycle to nanosecond divisor (power of two) - */ -struct cyclecounter { - cycle_t (*read)(const struct cyclecounter *cc); - cycle_t mask; - u32 mult; - u32 shift; -}; - -/** - * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds - * Contains the state needed by timecounter_read() to detect - * cycle counter wrap around. Initialize with - * timecounter_init(). Also used to convert cycle counts into the - * corresponding nanosecond counts with timecounter_cyc2time(). Users - * of this code are responsible for initializing the underlying - * cycle counter hardware, locking issues and reading the time - * more often than the cycle counter wraps around. The nanosecond - * counter will only wrap around after ~585 years. - * - * @cc: the cycle counter used by this instance - * @cycle_last: most recent cycle counter value seen by - * timecounter_read() - * @nsec: continuously increasing count - */ -struct timecounter { - const struct cyclecounter *cc; - cycle_t cycle_last; - u64 nsec; -}; - -/** - * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds - * @cc: Pointer to cycle counter. - * @cycles: Cycles - * - * XXX - This could use some mult_lxl_ll() asm optimization. Same code - * as in cyc2ns, but with unsigned result. - */ -static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, - cycle_t cycles) -{ - u64 ret = (u64)cycles; - ret = (ret * cc->mult) >> cc->shift; - return ret; -} - -/** - * timecounter_init - initialize a time counter - * @tc: Pointer to time counter which is to be initialized/reset - * @cc: A cycle counter, ready to be used. - * @start_tstamp: Arbitrary initial time stamp. - * - * After this call the current cycle register (roughly) corresponds to - * the initial time stamp. Every call to timecounter_read() increments - * the time stamp counter by the number of elapsed nanoseconds. - */ -extern void timecounter_init(struct timecounter *tc, - const struct cyclecounter *cc, - u64 start_tstamp); - -/** - * timecounter_read - return nanoseconds elapsed since timecounter_init() - * plus the initial time stamp - * @tc: Pointer to time counter. - * - * In other words, keeps track of time since the same epoch as - * the function which generated the initial time stamp. - */ -extern u64 timecounter_read(struct timecounter *tc); - -/** - * timecounter_cyc2time - convert a cycle counter to same - * time base as values returned by - * timecounter_read() - * @tc: Pointer to time counter. - * @cycle_tstamp: a value returned by tc->cc->read() - * - * Cycle counts that are converted correctly as long as they - * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], - * with "max cycle count" == cs->mask+1. - * - * This allows conversion of cycle counter values which were generated - * in the past. - */ -extern u64 timecounter_cyc2time(struct timecounter *tc, - cycle_t cycle_tstamp); - /** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 25c791e295fd..f1e41b33462f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -42,7 +42,7 @@ #include -#include +#include #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h new file mode 100644 index 000000000000..146f07a6651b --- /dev/null +++ b/include/linux/timecounter.h @@ -0,0 +1,122 @@ +/* + * linux/include/linux/timecounter.h + * + * based on code that migrated away from + * linux/include/linux/clocksource.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _LINUX_TIMECOUNTER_H +#define _LINUX_TIMECOUNTER_H + +#include + +/** + * struct cyclecounter - hardware abstraction for a free running counter + * Provides completely state-free accessors to the underlying hardware. + * Depending on which hardware it reads, the cycle counter may wrap + * around quickly. Locking rules (if necessary) have to be defined + * by the implementor and user of specific instances of this API. + * + * @read: returns the current cycle value + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters, + * see CLOCKSOURCE_MASK() helper macro + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + */ +struct cyclecounter { + cycle_t (*read)(const struct cyclecounter *cc); + cycle_t mask; + u32 mult; + u32 shift; +}; + +/** + * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds + * Contains the state needed by timecounter_read() to detect + * cycle counter wrap around. Initialize with + * timecounter_init(). Also used to convert cycle counts into the + * corresponding nanosecond counts with timecounter_cyc2time(). Users + * of this code are responsible for initializing the underlying + * cycle counter hardware, locking issues and reading the time + * more often than the cycle counter wraps around. The nanosecond + * counter will only wrap around after ~585 years. + * + * @cc: the cycle counter used by this instance + * @cycle_last: most recent cycle counter value seen by + * timecounter_read() + * @nsec: continuously increasing count + */ +struct timecounter { + const struct cyclecounter *cc; + cycle_t cycle_last; + u64 nsec; +}; + +/** + * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds + * @cc: Pointer to cycle counter. + * @cycles: Cycles + * + * XXX - This could use some mult_lxl_ll() asm optimization. Same code + * as in cyc2ns, but with unsigned result. + */ +static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, + cycle_t cycles) +{ + u64 ret = (u64)cycles; + ret = (ret * cc->mult) >> cc->shift; + return ret; +} + +/** + * timecounter_init - initialize a time counter + * @tc: Pointer to time counter which is to be initialized/reset + * @cc: A cycle counter, ready to be used. + * @start_tstamp: Arbitrary initial time stamp. + * + * After this call the current cycle register (roughly) corresponds to + * the initial time stamp. Every call to timecounter_read() increments + * the time stamp counter by the number of elapsed nanoseconds. + */ +extern void timecounter_init(struct timecounter *tc, + const struct cyclecounter *cc, + u64 start_tstamp); + +/** + * timecounter_read - return nanoseconds elapsed since timecounter_init() + * plus the initial time stamp + * @tc: Pointer to time counter. + * + * In other words, keeps track of time since the same epoch as + * the function which generated the initial time stamp. + */ +extern u64 timecounter_read(struct timecounter *tc); + +/** + * timecounter_cyc2time - convert a cycle counter to same + * time base as values returned by + * timecounter_read() + * @tc: Pointer to time counter. + * @cycle_tstamp: a value returned by tc->cc->read() + * + * Cycle counts that are converted correctly as long as they + * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], + * with "max cycle count" == cs->mask+1. + * + * This allows conversion of cycle counter values which were generated + * in the past. + */ +extern u64 timecounter_cyc2time(struct timecounter *tc, + cycle_t cycle_tstamp); + +#endif diff --git a/include/linux/types.h b/include/linux/types.h index a0bb7048687f..62323825cff9 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -213,5 +213,8 @@ struct callback_head { }; #define rcu_head callback_head +/* clocksource cycle base type */ +typedef u64 cycle_t; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ -- cgit v1.2.3 From 796c1efd6fa0ed696d550b68f4410ab1a1749d01 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sun, 21 Dec 2014 19:46:57 +0100 Subject: timecounter: provide a helper function to shift the time. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some PTP Hardware Clock drivers use a struct timecounter to represent their clock. To adjust the time by a given offset, these drivers all perform a two step read/write of their timecounter. However, it is better and simpler just to adjust the offset in one step. This patch introduces a little routine to help drivers implement the adjtime method. Suggested-by: Janusz Użycki Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- include/linux/timecounter.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 146f07a6651b..af3dfa4e90f0 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -78,6 +78,15 @@ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, return ret; } +/** + * timecounter_adjtime - Shifts the time of the clock. + * @delta: Desired change in nanoseconds. + */ +static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + /** * timecounter_init - initialize a time counter * @tc: Pointer to time counter which is to be initialized/reset -- cgit v1.2.3 From 2eebdde6528a722fbf8e2cffcf7aa52cbb4c2de0 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sun, 21 Dec 2014 19:47:06 +0100 Subject: timecounter: keep track of accumulated fractional nanoseconds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current timecounter implementation will drop a variable amount of resolution, depending on the magnitude of the time delta. In other words, reading the clock too often or too close to a time stamp conversion will introduce errors into the time values. This patch fixes the issue by introducing a fractional nanosecond field that accumulates the low order bits. Reported-by: Janusz Użycki Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_clock.c | 4 ++-- include/linux/timecounter.h | 19 ++++++++++------ kernel/time/timecounter.c | 31 +++++++++++++++++++++------ virt/kvm/arm/arch_timer.c | 3 ++- 4 files changed, 40 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index df35d0e1b899..e9cce4f72b24 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -240,7 +240,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; unsigned long flags; - u64 ns; + u64 ns, zero = 0; rwlock_init(&mdev->clock_lock); @@ -265,7 +265,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ - ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); + ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index af3dfa4e90f0..74f45496e6d1 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -55,27 +55,32 @@ struct cyclecounter { * @cycle_last: most recent cycle counter value seen by * timecounter_read() * @nsec: continuously increasing count + * @mask: bit mask for maintaining the 'frac' field + * @frac: accumulated fractional nanoseconds */ struct timecounter { const struct cyclecounter *cc; cycle_t cycle_last; u64 nsec; + u64 mask; + u64 frac; }; /** * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds * @cc: Pointer to cycle counter. * @cycles: Cycles - * - * XXX - This could use some mult_lxl_ll() asm optimization. Same code - * as in cyc2ns, but with unsigned result. + * @mask: bit mask for maintaining the 'frac' field + * @frac: pointer to storage for the fractional nanoseconds. */ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, - cycle_t cycles) + cycle_t cycles, u64 mask, u64 *frac) { - u64 ret = (u64)cycles; - ret = (ret * cc->mult) >> cc->shift; - return ret; + u64 ns = (u64) cycles; + + ns = (ns * cc->mult) + *frac; + *frac = ns & mask; + return ns >> cc->shift; } /** diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c index 59a1ec3a57cb..4687b3104bae 100644 --- a/kernel/time/timecounter.c +++ b/kernel/time/timecounter.c @@ -25,6 +25,8 @@ void timecounter_init(struct timecounter *tc, tc->cc = cc; tc->cycle_last = cc->read(cc); tc->nsec = start_tstamp; + tc->mask = (1ULL << cc->shift) - 1; + tc->frac = 0; } EXPORT_SYMBOL_GPL(timecounter_init); @@ -51,7 +53,8 @@ static u64 timecounter_read_delta(struct timecounter *tc) cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; /* convert to nanoseconds: */ - ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); + ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta, + tc->mask, &tc->frac); /* update time stamp of timecounter_read_delta() call: */ tc->cycle_last = cycle_now; @@ -72,22 +75,36 @@ u64 timecounter_read(struct timecounter *tc) } EXPORT_SYMBOL_GPL(timecounter_read); +/* + * This is like cyclecounter_cyc2ns(), but it is used for computing a + * time previous to the time stored in the cycle counter. + */ +static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, + cycle_t cycles, u64 mask, u64 frac) +{ + u64 ns = (u64) cycles; + + ns = ((ns * cc->mult) - frac) >> cc->shift; + + return ns; +} + u64 timecounter_cyc2time(struct timecounter *tc, cycle_t cycle_tstamp) { - u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; - u64 nsec; + u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; + u64 nsec = tc->nsec, frac = tc->frac; /* * Instead of always treating cycle_tstamp as more recent * than tc->cycle_last, detect when it is too far in the * future and treat it as old time stamp instead. */ - if (cycle_delta > tc->cc->mask / 2) { - cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; - nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); + if (delta > tc->cc->mask / 2) { + delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; + nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); } else { - nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; + nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); } return nsec; diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 1c0772b340d8..6e54f3542126 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) return; } - ns = cyclecounter_cyc2ns(timecounter->cc, cval - now); + ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, + &timecounter->frac); timer_arm(timer, ns); } -- cgit v1.2.3 From dd450777990baae668c1143064f2f234dbab1b9b Mon Sep 17 00:00:00 2001 From: Dmitry Eremin-Solenikov Date: Wed, 24 Dec 2014 01:14:14 +0300 Subject: arm: sa1100: move irda header to linux/platform_data In the end asm/mach/irda.h header is not used by anybody except sa1100. Move the header to the platform data includes dir and rename it to irda-sa11x0.h. Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: David S. Miller --- arch/arm/include/asm/mach/irda.h | 20 -------------------- arch/arm/mach-sa1100/assabet.c | 2 +- arch/arm/mach-sa1100/collie.c | 2 +- arch/arm/mach-sa1100/h3100.c | 2 +- arch/arm/mach-sa1100/h3600.c | 2 +- drivers/net/irda/sa1100_ir.c | 2 +- include/linux/platform_data/irda-sa11x0.h | 20 ++++++++++++++++++++ 7 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 arch/arm/include/asm/mach/irda.h create mode 100644 include/linux/platform_data/irda-sa11x0.h (limited to 'include/linux') diff --git a/arch/arm/include/asm/mach/irda.h b/arch/arm/include/asm/mach/irda.h deleted file mode 100644 index 38f77b5e56cf..000000000000 --- a/arch/arm/include/asm/mach/irda.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * arch/arm/include/asm/mach/irda.h - * - * Copyright (C) 2004 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARM_MACH_IRDA_H -#define __ASM_ARM_MACH_IRDA_H - -struct irda_platform_data { - int (*startup)(struct device *); - void (*shutdown)(struct device *); - int (*set_power)(struct device *, unsigned int state); - void (*set_speed)(struct device *, unsigned int speed); -}; - -#endif diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 7dd894ece9ae..d28ecb9ef172 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -37,7 +37,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index b90c7d828391..7fcbe3d119c7 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c index 3c43219bc881..c6b412054a3c 100644 --- a/arch/arm/mach-sa1100/h3100.c +++ b/arch/arm/mach-sa1100/h3100.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c index 5be54c214c7c..118338efd790 100644 --- a/arch/arm/mach-sa1100/h3600.c +++ b/arch/arm/mach-sa1100/h3600.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 7b17fa2114e1..b6e44ff4e373 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -38,7 +38,7 @@ #include #include -#include +#include static int power_level = 3; static int tx_lpm; diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h new file mode 100644 index 000000000000..38f77b5e56cf --- /dev/null +++ b/include/linux/platform_data/irda-sa11x0.h @@ -0,0 +1,20 @@ +/* + * arch/arm/include/asm/mach/irda.h + * + * Copyright (C) 2004 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_MACH_IRDA_H +#define __ASM_ARM_MACH_IRDA_H + +struct irda_platform_data { + int (*startup)(struct device *); + void (*shutdown)(struct device *); + int (*set_power)(struct device *, unsigned int state); + void (*set_speed)(struct device *, unsigned int speed); +}; + +#endif -- cgit v1.2.3 From de40ed31b3c577cefd7b54972365a272ecbe9dd6 Mon Sep 17 00:00:00 2001 From: Nimrod Andy Date: Wed, 24 Dec 2014 17:30:39 +0800 Subject: net: fec: add Wake-on-LAN support Support for Wake-on-LAN using Magic Packet. ENET IP supports sleep mode in low power status, when system enter suspend status, Magic packet can wake up system even if all SOC clocks are gate. The patch doing below things: - flagging the device as a wakeup source for the system, as well as its Wake-on-LAN interrupt - prepare the hardware for entering WoL mode - add standard ethtool WOL interface - enable the ENET interrupt to wake us Tested on i.MX6q/dl sabresd, sabreauto boards, i.MX6SX arm2 boards. Signed-off-by: Fugang Duan Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 2 + drivers/net/ethernet/freescale/fec.h | 2 + drivers/net/ethernet/freescale/fec_main.c | 104 +++++++++++++++++++--- include/linux/fec.h | 1 + 4 files changed, 99 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 0c8775c45798..a9eb611bee68 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -22,6 +22,8 @@ Optional properties: - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports hw multi queues. Should specify the rx queue number, otherwise set rx queue number to 1. +- fsl,magic-packet : If present, indicates that the hardware supports waking + up via magic packet. Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index df8bbddaeb37..d77a96fdf1dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -357,6 +357,7 @@ struct bufdesc_ex { #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) #define FEC_ENET_TS_AVAIL ((uint)0x00010000) @@ -512,6 +513,7 @@ struct fec_enet_private { int irq[FEC_IRQ_NUM]; bool bufdesc_ex; int pause_flag; + int wol_flag; u32 quirks; struct napi_struct napi; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5ebdf8dc8a31..49cd358c30fa 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -187,6 +187,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) +/* FEC ECR bits definition */ +#define FEC_ECR_MAGICEN (1 << 2) +#define FEC_ECR_SLEEP (1 << 3) #define FEC_MII_TIMEOUT 30000 /* us */ @@ -195,6 +198,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_PAUSE_FLAG_AUTONEG 0x1 #define FEC_PAUSE_FLAG_ENABLE 0x2 +#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define FEC_WOL_FLAG_ENABLE (0x1 << 1) +#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) #define COPYBREAK_DEFAULT 256 @@ -1089,7 +1095,9 @@ static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + u32 val; /* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@ -1103,17 +1111,28 @@ fec_stop(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (fep->quirks & FEC_QUIRK_HAS_AVB) { - writel(0, fep->hwp + FEC_ECNTRL); + if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + + if (pdata && pdata->sleep_mode_enable) + pdata->sleep_mode_enable(true); } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (fep->quirks & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC && + !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } @@ -2427,6 +2446,44 @@ static int fec_enet_set_tunable(struct net_device *netdev, return ret; } +static void +fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; + } else { + wol->supported = wol->wolopts = 0; + } +} + +static int +fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) + return -EINVAL; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); + if (device_may_wakeup(&ndev->dev)) { + fep->wol_flag |= FEC_WOL_FLAG_ENABLE; + if (fep->irq[0] > 0) + enable_irq_wake(fep->irq[0]); + } else { + fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); + if (fep->irq[0] > 0) + disable_irq_wake(fep->irq[0]); + } + + return 0; +} + static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, @@ -2445,6 +2502,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_ts_info = fec_enet_get_ts_info, .get_tunable = fec_enet_get_tunable, .set_tunable = fec_enet_set_tunable, + .get_wol = fec_enet_get_wol, + .set_wol = fec_enet_set_wol, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -2705,6 +2764,9 @@ fec_enet_open(struct net_device *ndev) phy_start(fep->phy_dev); netif_tx_start_all_queues(ndev); + device_set_wakeup_enable(&ndev->dev, fep->wol_flag & + FEC_WOL_FLAG_ENABLE); + return 0; err_enet_mii_probe: @@ -3153,6 +3215,9 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + if (of_get_property(np, "fsl,magic-packet", NULL)) + fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + phy_node = of_parse_phandle(np, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); @@ -3247,6 +3312,8 @@ fec_probe(struct platform_device *pdev) 0, pdev->name, ndev); if (ret) goto failed_irq; + + fep->irq[i] = irq; } init_completion(&fep->mdio_done); @@ -3263,6 +3330,9 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_register; + device_init_wakeup(&ndev->dev, fep->wol_flag & + FEC_WOL_HAS_MAGIC_PACKET); + if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); @@ -3316,6 +3386,8 @@ static int __maybe_unused fec_suspend(struct device *dev) rtnl_lock(); if (netif_running(ndev)) { + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) + fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; phy_stop(fep->phy_dev); napi_disable(&fep->napi); netif_tx_lock_bh(ndev); @@ -3323,11 +3395,12 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_tx_unlock_bh(ndev); fec_stop(ndev); fec_enet_clk_enable(ndev, false); - pinctrl_pm_select_sleep_state(&fep->pdev->dev); + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + pinctrl_pm_select_sleep_state(&fep->pdev->dev); } rtnl_unlock(); - if (fep->reg_phy) + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) regulator_disable(fep->reg_phy); /* SOC supply clock to phy, when clock is disabled, phy link down @@ -3343,9 +3416,11 @@ static int __maybe_unused fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; int ret; + int val; - if (fep->reg_phy) { + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { ret = regulator_enable(fep->reg_phy); if (ret) return ret; @@ -3353,12 +3428,21 @@ static int __maybe_unused fec_resume(struct device *dev) rtnl_lock(); if (netif_running(ndev)) { - pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) { rtnl_unlock(); goto failed_clk; } + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { + if (pdata && pdata->sleep_mode_enable) + pdata->sleep_mode_enable(false); + val = readl(fep->hwp + FEC_ECNTRL); + val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; + } else { + pinctrl_pm_select_default_state(&fep->pdev->dev); + } fec_restart(ndev); netif_tx_lock_bh(ndev); netif_device_attach(ndev); diff --git a/include/linux/fec.h b/include/linux/fec.h index bcff455d1d53..1454a503622d 100644 --- a/include/linux/fec.h +++ b/include/linux/fec.h @@ -19,6 +19,7 @@ struct fec_platform_data { phy_interface_t phy; unsigned char mac[ETH_ALEN]; + void (*sleep_mode_enable)(int enabled); }; #endif -- cgit v1.2.3 From 9b174d88c257150562b0101fcc6cb6c3cb74275c Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Tue, 30 Dec 2014 19:10:15 -0800 Subject: net: Add Transparent Ethernet Bridging GRO support. Currently the only tunnel protocol that supports GRO with encapsulated Ethernet is VXLAN. This pulls out the Ethernet code into a proper layer so that it can be used by other tunnel protocols such as GRE and Geneve. Signed-off-by: Jesse Gross Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 53 +++----------------------- include/linux/etherdevice.h | 4 ++ net/ethernet/eth.c | 92 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7fbd89fbe107..2ab0922af0b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -549,10 +549,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff { struct sk_buff *p, **pp = NULL; struct vxlanhdr *vh, *vh2; - struct ethhdr *eh, *eh2; - unsigned int hlen, off_vx, off_eth; - const struct packet_offload *ptype; - __be16 type; + unsigned int hlen, off_vx; int flush = 1; off_vx = skb_gro_offset(skb); @@ -563,17 +560,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff if (unlikely(!vh)) goto out; } - skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); - - off_eth = skb_gro_offset(skb); - hlen = off_eth + sizeof(*eh); - eh = skb_gro_header_fast(skb, off_eth); - if (skb_gro_header_hard(skb, hlen)) { - eh = skb_gro_header_slow(skb, hlen, off_eth); - if (unlikely(!eh)) - goto out; - } flush = 0; @@ -582,28 +568,16 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff continue; vh2 = (struct vxlanhdr *)(p->data + off_vx); - eh2 = (struct ethhdr *)(p->data + off_eth); - if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) { + if (vh->vx_vni != vh2->vx_vni) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } - type = eh->h_proto; - - rcu_read_lock(); - ptype = gro_find_receive_by_type(type); - if (ptype == NULL) { - flush = 1; - goto out_unlock; - } - - skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ - skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); - pp = ptype->callbacks.gro_receive(head, skb); + skb_gro_pull(skb, sizeof(struct vxlanhdr)); + skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); + pp = eth_gro_receive(head, skb); -out_unlock: - rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; @@ -612,24 +586,9 @@ out: static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) { - struct ethhdr *eh; - struct packet_offload *ptype; - __be16 type; - int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); - int err = -ENOSYS; - udp_tunnel_gro_complete(skb, nhoff); - eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); - type = eh->h_proto; - - rcu_read_lock(); - ptype = gro_find_complete_by_type(type); - if (ptype != NULL) - err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len); - - rcu_read_unlock(); - return err; + return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } /* Notify netdevs that UDP port started listening */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 41c891d05f04..1d869d185a0d 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) +struct sk_buff **eth_gro_receive(struct sk_buff **head, + struct sk_buff *skb); +int eth_gro_complete(struct sk_buff *skb, int nhoff); + /* Reserved Ethernet Addresses per IEEE 802.1Q */ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 33a140e15834..238f38d21641 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -424,3 +424,95 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr); } EXPORT_SYMBOL(sysfs_format_mac); + +struct sk_buff **eth_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct sk_buff *p, **pp = NULL; + struct ethhdr *eh, *eh2; + unsigned int hlen, off_eth; + const struct packet_offload *ptype; + __be16 type; + int flush = 1; + + off_eth = skb_gro_offset(skb); + hlen = off_eth + sizeof(*eh); + eh = skb_gro_header_fast(skb, off_eth); + if (skb_gro_header_hard(skb, hlen)) { + eh = skb_gro_header_slow(skb, hlen, off_eth); + if (unlikely(!eh)) + goto out; + } + + flush = 0; + + for (p = *head; p; p = p->next) { + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + eh2 = (struct ethhdr *)(p->data + off_eth); + if (compare_ether_header(eh, eh2)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + type = eh->h_proto; + + rcu_read_lock(); + ptype = gro_find_receive_by_type(type); + if (ptype == NULL) { + flush = 1; + goto out_unlock; + } + + skb_gro_pull(skb, sizeof(*eh)); + skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); + pp = ptype->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} +EXPORT_SYMBOL(eth_gro_receive); + +int eth_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff); + __be16 type = eh->h_proto; + struct packet_offload *ptype; + int err = -ENOSYS; + + if (skb->encapsulation) + skb_set_inner_mac_header(skb, nhoff); + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype != NULL) + err = ptype->callbacks.gro_complete(skb, nhoff + + sizeof(struct ethhdr)); + + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(eth_gro_complete); + +static struct packet_offload eth_packet_offload __read_mostly = { + .type = cpu_to_be16(ETH_P_TEB), + .callbacks = { + .gro_receive = eth_gro_receive, + .gro_complete = eth_gro_complete, + }, +}; + +static int __init eth_offload_init(void) +{ + dev_add_offload(ð_packet_offload); + + return 0; +} + +fs_initcall(eth_offload_init); -- cgit v1.2.3 From 1891172aa5c32f08ad9931b794edd71e91a4a527 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Fri, 2 Jan 2015 20:22:03 +0100 Subject: timecounter: provide a macro to initialize the cyclecounter mask field. There is no need for users of the timecounter/cyclecounter code to include clocksource.h just for a single macro. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- include/linux/timecounter.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 74f45496e6d1..4382035a75bb 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -19,6 +19,9 @@ #include +/* simplify initialization of mask field */ +#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) + /** * struct cyclecounter - hardware abstraction for a free running counter * Provides completely state-free accessors to the underlying hardware. @@ -29,7 +32,7 @@ * @read: returns the current cycle value * @mask: bitmask for two's complement * subtraction of non 64 bit counters, - * see CLOCKSOURCE_MASK() helper macro + * see CYCLECOUNTER_MASK() helper macro * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) */ -- cgit v1.2.3 From 8d24c0b43125ec26cc80e04588477a9a2afc025c Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:14 +0100 Subject: rhashtable: Do hashing inside of rhashtable_lookup_compare() Hash the key inside of rhashtable_lookup_compare() like rhashtable_lookup() does. This allows to simplify the hashing functions and keep them private. Signed-off-by: Thomas Graf Cc: netfilter-devel@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 5 +-- lib/rhashtable.c | 91 +++++++++++++++------------------------------- net/netfilter/nft_hash.c | 46 ++++++++++++++--------- net/netlink/af_netlink.c | 5 +-- 4 files changed, 61 insertions(+), 86 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index b93fd89b2e5e..1b51221c6bbd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -96,9 +96,6 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); -u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); -u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); - void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, @@ -111,7 +108,7 @@ int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, +void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg); void rhashtable_destroy(const struct rhashtable *ht); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c3c723e902b..1ee0eb636ca3 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -42,69 +42,39 @@ static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) return (void *) he - ht->p.head_offset; } -static u32 __hashfn(const struct rhashtable *ht, const void *key, - u32 len, u32 hsize) +static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) { - u32 h; - - h = ht->p.hashfn(key, len, ht->p.hash_rnd); - - return h & (hsize - 1); -} - -/** - * rhashtable_hashfn - compute hash for key of given length - * @ht: hash table to compute for - * @key: pointer to key - * @len: length of key - * - * Computes the hash value using the hash function provided in the 'hashfn' - * of struct rhashtable_params. The returned value is guaranteed to be - * smaller than the number of buckets in the hash table. - */ -u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) -{ - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - - return __hashfn(ht, key, len, tbl->size); + return hash & (tbl->size - 1); } -EXPORT_SYMBOL_GPL(rhashtable_hashfn); -static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) +static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) { - if (unlikely(!ht->p.key_len)) { - u32 h; - - h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + u32 hash; - return h & (hsize - 1); - } + if (unlikely(!ht->p.key_len)) + hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + else + hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, + ht->p.hash_rnd); - return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); + return hash; } -/** - * rhashtable_obj_hashfn - compute hash for hashed object - * @ht: hash table to compute for - * @ptr: pointer to hashed object - * - * Computes the hash value using the hash function `hashfn` respectively - * 'obj_hashfn' depending on whether the hash table is set up to work with - * a fixed length key. The returned value is guaranteed to be smaller than - * the number of buckets in the hash table. - */ -u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) +static u32 key_hashfn(const struct rhashtable *ht, const void *key, u32 len) { struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + u32 hash; + + hash = ht->p.hashfn(key, len, ht->p.hash_rnd); - return obj_hashfn(ht, ptr, tbl->size); + return rht_bucket_index(tbl, hash); } -EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); static u32 head_hashfn(const struct rhashtable *ht, - const struct rhash_head *he, u32 hsize) + const struct bucket_table *tbl, + const struct rhash_head *he) { - return obj_hashfn(ht, rht_obj(ht, he), hsize); + return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } static struct bucket_table *bucket_table_alloc(size_t nbuckets) @@ -170,9 +140,9 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * reaches a node that doesn't hash to the same bucket as the * previous node p. Call the previous node p; */ - h = head_hashfn(ht, p, new_tbl->size); + h = head_hashfn(ht, new_tbl, p); rht_for_each(he, p->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) != h) + if (head_hashfn(ht, new_tbl, he) != h) break; p = he; } @@ -184,7 +154,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, next = NULL; if (he) { rht_for_each(he, he->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) == h) { + if (head_hashfn(ht, new_tbl, he) == h) { next = he; break; } @@ -237,9 +207,9 @@ int rhashtable_expand(struct rhashtable *ht) * single imprecise chain. */ for (i = 0; i < new_tbl->size; i++) { - h = i & (old_tbl->size - 1); + h = rht_bucket_index(old_tbl, i); rht_for_each(he, old_tbl->buckets[h], ht) { - if (head_hashfn(ht, he, new_tbl->size) == i) { + if (head_hashfn(ht, new_tbl, he) == i) { RCU_INIT_POINTER(new_tbl->buckets[i], he); break; } @@ -353,7 +323,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) ASSERT_RHT_MUTEX(ht); - hash = head_hashfn(ht, obj, tbl->size); + hash = head_hashfn(ht, tbl, obj); RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); rcu_assign_pointer(tbl->buckets[hash], obj); ht->nelems++; @@ -413,7 +383,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) ASSERT_RHT_MUTEX(ht); - h = head_hashfn(ht, obj, tbl->size); + h = head_hashfn(ht, tbl, obj); pprev = &tbl->buckets[h]; rht_for_each(he, tbl->buckets[h], ht) { @@ -452,7 +422,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) BUG_ON(!ht->p.key_len); - h = __hashfn(ht, key, ht->p.key_len, tbl->size); + h = key_hashfn(ht, key, ht->p.key_len); rht_for_each_rcu(he, tbl->buckets[h], ht) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) @@ -467,7 +437,7 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); /** * rhashtable_lookup_compare - search hash table with compare function * @ht: hash table - * @hash: hash value of desired entry + * @key: the pointer to the key * @compare: compare function, must return true on match * @arg: argument passed on to compare function * @@ -479,15 +449,14 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); * * Returns the first entry on which the compare function returned true. */ -void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, +void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg) { const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); struct rhash_head *he; + u32 hash; - if (unlikely(hash >= tbl->size)) - return NULL; - + hash = key_hashfn(ht, key, ht->p.key_len); rht_for_each_rcu(he, tbl->buckets[hash], ht) { if (!compare(rht_obj(ht, he), arg)) continue; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 1e316ce4cb5d..614ee099ba36 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -94,28 +94,40 @@ static void nft_hash_remove(const struct nft_set *set, kfree(he); } +struct nft_compare_arg { + const struct nft_set *set; + struct nft_set_elem *elem; +}; + +static bool nft_hash_compare(void *ptr, void *arg) +{ + struct nft_hash_elem *he = ptr; + struct nft_compare_arg *x = arg; + + if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) { + x->elem->cookie = &he->node; + x->elem->flags = 0; + if (x->set->flags & NFT_SET_MAP) + nft_data_copy(&x->elem->data, he->data); + + return true; + } + + return false; +} + static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) { const struct rhashtable *priv = nft_set_priv(set); - const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv); - struct rhash_head __rcu * const *pprev; - struct nft_hash_elem *he; - u32 h; - - h = rhashtable_hashfn(priv, &elem->key, set->klen); - pprev = &tbl->buckets[h]; - rht_for_each_entry_rcu(he, tbl->buckets[h], node) { - if (nft_data_cmp(&he->key, &elem->key, set->klen)) { - pprev = &he->node.next; - continue; - } + struct nft_compare_arg arg = { + .set = set, + .elem = elem, + }; - elem->cookie = (void *)pprev; - elem->flags = 0; - if (set->flags & NFT_SET_MAP) - nft_data_copy(&elem->data, he->data); + if (rhashtable_lookup_compare(priv, &elem->key, + &nft_hash_compare, &arg)) return 0; - } + return -ENOENT; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 84ea76ca3f1f..a5d7ed627563 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1002,11 +1002,8 @@ static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, .net = net, .portid = portid, }; - u32 hash; - hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid)); - - return rhashtable_lookup_compare(&table->hash, hash, + return rhashtable_lookup_compare(&table->hash, &portid, &netlink_compare, &arg); } -- cgit v1.2.3 From 88d6ed15acff1cb44b1d1f3c0a393b7f7744957a Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:16 +0100 Subject: rhashtable: Convert bucket iterators to take table and index This patch is in preparation to introduce per bucket spinlocks. It extends all iterator macros to take the bucket table and bucket index. It also introduces a new rht_dereference_bucket() to handle protected accesses to buckets. It introduces a barrier() to the RCU iterators to the prevent the compiler from caching the first element. The lockdep verifier is introduced as stub which always succeeds and properly implement in the next patch when the locks are introduced. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 173 ++++++++++++++++++++++++++++++--------------- lib/rhashtable.c | 30 +++++--- net/netfilter/nft_hash.c | 12 ++-- net/netlink/af_netlink.c | 12 ++-- net/netlink/diag.c | 4 +- 5 files changed, 152 insertions(+), 79 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 1b51221c6bbd..b54e24a08806 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -87,11 +87,18 @@ struct rhashtable { #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(const struct rhashtable *ht); +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); #else static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) { return 1; } + +static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, + u32 hash) +{ + return 1; +} #endif /* CONFIG_PROVE_LOCKING */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); @@ -119,92 +126,144 @@ void rhashtable_destroy(const struct rhashtable *ht); #define rht_dereference_rcu(p, ht) \ rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) -#define rht_entry(ptr, type, member) container_of(ptr, type, member) -#define rht_entry_safe(ptr, type, member) \ -({ \ - typeof(ptr) __ptr = (ptr); \ - __ptr ? rht_entry(__ptr, type, member) : NULL; \ -}) +#define rht_dereference_bucket(p, tbl, hash) \ + rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) -#define rht_next_entry_safe(pos, ht, member) \ -({ \ - pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \ - typeof(*(pos)), member) : NULL; \ -}) +#define rht_dereference_bucket_rcu(p, tbl, hash) \ + rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) + +#define rht_entry(tpos, pos, member) \ + ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) /** - * rht_for_each - iterate over hash chain - * @pos: &struct rhash_head to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable + * rht_for_each_continue - continue iterating over hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index */ -#define rht_for_each(pos, head, ht) \ - for (pos = rht_dereference(head, ht); \ +#define rht_for_each_continue(pos, head, tbl, hash) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ pos; \ - pos = rht_dereference((pos)->next, ht)) + pos = rht_dereference_bucket((pos)->next, tbl, hash)) + +/** + * rht_for_each - iterate over hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + */ +#define rht_for_each(pos, tbl, hash) \ + rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) + +/** + * rht_for_each_entry_continue - continue iterating over hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + */ +#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + pos && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each_entry - iterate over hash chain of given type - * @pos: type * to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. */ -#define rht_for_each_entry(pos, head, ht, member) \ - for (pos = rht_entry_safe(rht_dereference(head, ht), \ - typeof(*(pos)), member); \ - pos; \ - pos = rht_next_entry_safe(pos, ht, member)) +#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + tbl, hash, member) /** * rht_for_each_entry_safe - safely iterate over hash chain of given type - * @pos: type * to use as a loop cursor. - * @n: type * to use for temporary next object storage - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @next: the &struct rhash_head to use as next in loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(pos, n, head, ht, member) \ - for (pos = rht_entry_safe(rht_dereference(head, ht), \ - typeof(*(pos)), member), \ - n = rht_next_entry_safe(pos, ht, member); \ - pos; \ - pos = n, \ - n = rht_next_entry_safe(pos, ht, member)) +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ + next = pos ? rht_dereference_bucket(pos->next, tbl, hash) \ + : NULL; \ + pos && rht_entry(tpos, pos, member); \ + pos = next) + +/** + * rht_for_each_rcu_continue - continue iterating over rcu hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + pos; \ + pos = rcu_dereference_raw(pos->next)) /** * rht_for_each_rcu - iterate over rcu hash chain - * @pos: &struct rhash_head to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index * * This hash chain list-traversal primitive may safely run concurrently with - * the _rcu fkht mutation primitives such as rht_insert() as long as the + * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_rcu(pos, head, ht) \ - for (pos = rht_dereference_rcu(head, ht); \ - pos; \ - pos = rht_dereference_rcu((pos)->next, ht)) +#define rht_for_each_rcu(pos, tbl, hash) \ + rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) + +/** + * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + pos && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) /** * rht_for_each_entry_rcu - iterate over rcu hash chain of given type - * @pos: type * to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with - * the _rcu fkht mutation primitives such as rht_insert() as long as the + * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(pos, head, member) \ - for (pos = rht_entry_safe(rcu_dereference_raw(head), \ - typeof(*(pos)), member); \ - pos; \ - pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \ - typeof(*(pos)), member)) +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ + tbl, hash, member) #endif /* _LINUX_RHASHTABLE_H */ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b658245826a1..ce450d095fdf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -35,6 +35,12 @@ int lockdep_rht_mutex_is_held(const struct rhashtable *ht) return ht->p.mutex_is_held(ht->p.parent); } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); + +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + return 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #endif static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) @@ -141,7 +147,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * previous node p. Call the previous node p; */ h = head_hashfn(ht, new_tbl, p); - rht_for_each(he, p->next, ht) { + rht_for_each_continue(he, p->next, old_tbl, n) { if (head_hashfn(ht, new_tbl, he) != h) break; p = he; @@ -153,7 +159,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ next = NULL; if (he) { - rht_for_each(he, he->next, ht) { + rht_for_each_continue(he, he->next, old_tbl, n) { if (head_hashfn(ht, new_tbl, he) == h) { next = he; break; @@ -208,7 +214,7 @@ int rhashtable_expand(struct rhashtable *ht) */ for (i = 0; i < new_tbl->size; i++) { h = rht_bucket_index(old_tbl, i); - rht_for_each(he, old_tbl->buckets[h], ht) { + rht_for_each(he, old_tbl, h) { if (head_hashfn(ht, new_tbl, he) == i) { RCU_INIT_POINTER(new_tbl->buckets[i], he); break; @@ -286,7 +292,7 @@ int rhashtable_shrink(struct rhashtable *ht) * to the new bucket. */ for (pprev = &ntbl->buckets[i]; *pprev != NULL; - pprev = &rht_dereference(*pprev, ht)->next) + pprev = &rht_dereference_bucket(*pprev, ntbl, i)->next) ; RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); } @@ -386,7 +392,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) h = head_hashfn(ht, tbl, obj); pprev = &tbl->buckets[h]; - rht_for_each(he, tbl->buckets[h], ht) { + rht_for_each(he, tbl, h) { if (he != obj) { pprev = &he->next; continue; @@ -423,7 +429,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) BUG_ON(!ht->p.key_len); h = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl->buckets[h], ht) { + rht_for_each_rcu(he, tbl, h) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) continue; @@ -457,7 +463,7 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, u32 hash; hash = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl->buckets[hash], ht) { + rht_for_each_rcu(he, tbl, hash) { if (!compare(rht_obj(ht, he), arg)) continue; return rht_obj(ht, he); @@ -625,6 +631,7 @@ static int __init test_rht_lookup(struct rhashtable *ht) static void test_bucket_stats(struct rhashtable *ht, bool quiet) { unsigned int cnt, rcu_cnt, i, total = 0; + struct rhash_head *pos; struct test_obj *obj; struct bucket_table *tbl; @@ -635,14 +642,14 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) if (!quiet) pr_info(" [%#4x/%zu]", i, tbl->size); - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { + rht_for_each_entry_rcu(obj, pos, tbl, i, node) { cnt++; total++; if (!quiet) pr_cont(" [%p],", obj); } - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) + rht_for_each_entry_rcu(obj, pos, tbl, i, node) rcu_cnt++; if (rcu_cnt != cnt) @@ -664,7 +671,8 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) static int __init test_rhashtable(struct rhashtable *ht) { struct bucket_table *tbl; - struct test_obj *obj, *next; + struct test_obj *obj; + struct rhash_head *pos, *next; int err; unsigned int i; @@ -733,7 +741,7 @@ static int __init test_rhashtable(struct rhashtable *ht) error: tbl = rht_dereference_rcu(ht->tbl, ht); for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) + rht_for_each_entry_safe(obj, pos, next, tbl, i, node) kfree(obj); return err; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 614ee099ba36..d93f1f4c22a9 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -142,7 +142,9 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, tbl = rht_dereference_rcu(priv->tbl, priv); for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(he, tbl->buckets[i], node) { + struct rhash_head *pos; + + rht_for_each_entry_rcu(he, pos, tbl, i, node) { if (iter->count < iter->skip) goto cont; @@ -197,15 +199,13 @@ static void nft_hash_destroy(const struct nft_set *set) { const struct rhashtable *priv = nft_set_priv(set); const struct bucket_table *tbl = priv->tbl; - struct nft_hash_elem *he, *next; + struct nft_hash_elem *he; + struct rhash_head *pos, *next; unsigned int i; for (i = 0; i < tbl->size; i++) { - for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node); - he != NULL; he = next) { - next = rht_entry(he->node.next, struct nft_hash_elem, node); + rht_for_each_entry_safe(he, pos, next, tbl, i, node) nft_hash_elem_destroy(set, he); - } } rhashtable_destroy(priv); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a5d7ed627563..57449b6089c2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2898,7 +2898,9 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); for (j = 0; j < tbl->size; j++) { - rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { + struct rhash_head *node; + + rht_for_each_entry_rcu(nlk, node, tbl, j, node) { s = (struct sock *)nlk; if (sock_net(s) != seq_file_net(seq)) @@ -2926,6 +2928,8 @@ static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct rhashtable *ht; + const struct bucket_table *tbl; + struct rhash_head *node; struct netlink_sock *nlk; struct nl_seq_iter *iter; struct net *net; @@ -2942,17 +2946,17 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) i = iter->link; ht = &nl_table[i].hash; - rht_for_each_entry(nlk, nlk->node.next, ht, node) + tbl = rht_dereference_rcu(ht->tbl, ht); + rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node) if (net_eq(sock_net((struct sock *)nlk), net)) return nlk; j = iter->hash_idx + 1; do { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); for (; j < tbl->size; j++) { - rht_for_each_entry(nlk, tbl->buckets[j], ht, node) { + rht_for_each_entry_rcu(nlk, node, tbl, j, node) { if (net_eq(sock_net((struct sock *)nlk), net)) { iter->link = i; iter->hash_idx = j; diff --git a/net/netlink/diag.c b/net/netlink/diag.c index de8c74a3c061..fcca36d81a62 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -113,7 +113,9 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, req = nlmsg_data(cb->nlh); for (i = 0; i < htbl->size; i++) { - rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) { + struct rhash_head *pos; + + rht_for_each_entry(nlsk, pos, htbl, i, node) { sk = (struct sock *)nlsk; if (!net_eq(sock_net(sk), net)) -- cgit v1.2.3 From 897362e446436d245972e72c6bc5b33bd7a5c659 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:18 +0100 Subject: nft_hash: Remove rhashtable_remove_pprev() The removal function of nft_hash currently stores a reference to the previous element during lookup which is used to optimize removal later on. This was possible because a lock is held throughout calling rhashtable_lookup() and rhashtable_remove(). With the introdution of deferred table resizing in parallel to lookups and insertions, the nftables lock will no longer synchronize all table mutations and the stored pprev may become invalid. Removing this optimization makes removal slightly more expensive on average but allows taking the resize cost out of the insert and remove path. Signed-off-by: Thomas Graf Cc: netfilter-devel@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 -- lib/rhashtable.c | 34 +++++++--------------------------- net/netfilter/nft_hash.c | 11 +++-------- 3 files changed, 10 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index b54e24a08806..f624d4b5045f 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -105,8 +105,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); -void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 0bd29c178910..e6b85c4a5828 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -344,32 +344,6 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) } EXPORT_SYMBOL_GPL(rhashtable_insert); -/** - * rhashtable_remove_pprev - remove object from hash table given previous element - * @ht: hash table - * @obj: pointer to hash head inside object - * @pprev: pointer to previous element - * - * Identical to rhashtable_remove() but caller is alreayd aware of the element - * in front of the element to be deleted. This is in particular useful for - * deletion when combined with walking or lookup. - */ -void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev) -{ - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - - ASSERT_RHT_MUTEX(ht); - - RCU_INIT_POINTER(*pprev, obj->next); - ht->nelems--; - - if (ht->p.shrink_decision && - ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht); -} -EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); - /** * rhashtable_remove - remove object from hash table * @ht: hash table @@ -403,7 +377,13 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) continue; } - rhashtable_remove_pprev(ht, he, pprev); + RCU_INIT_POINTER(*pprev, he->next); + ht->nelems--; + + if (ht->p.shrink_decision && + ht->p.shrink_decision(ht, tbl->size)) + rhashtable_shrink(ht); + return true; } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index d93f1f4c22a9..7f903cf9a1b9 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -83,15 +83,10 @@ static void nft_hash_remove(const struct nft_set *set, const struct nft_set_elem *elem) { struct rhashtable *priv = nft_set_priv(set); - struct rhash_head *he, __rcu **pprev; - - pprev = elem->cookie; - he = rht_dereference((*pprev), priv); - - rhashtable_remove_pprev(priv, he, pprev); + rhashtable_remove(priv, elem->cookie); synchronize_rcu(); - kfree(he); + kfree(elem->cookie); } struct nft_compare_arg { @@ -105,7 +100,7 @@ static bool nft_hash_compare(void *ptr, void *arg) struct nft_compare_arg *x = arg; if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) { - x->elem->cookie = &he->node; + x->elem->cookie = he; x->elem->flags = 0; if (x->set->flags & NFT_SET_MAP) nft_data_copy(&x->elem->data, he->data); -- cgit v1.2.3 From 113948d841e8d78039e5dbbb5248f5b73e99eafa Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:19 +0100 Subject: spinlock: Add spin_lock_bh_nested() Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/spinlock.h | 8 ++++++++ include/linux/spinlock_api_smp.h | 2 ++ include/linux/spinlock_api_up.h | 1 + kernel/locking/spinlock.c | 8 ++++++++ 4 files changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 262ba4ef9a8e..3e18379dfa6f 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) +# define raw_spin_lock_bh_nested(lock, subclass) \ + _raw_spin_lock_bh_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -324,6 +327,11 @@ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) +#define spin_lock_bh_nested(lock, subclass) \ +do { \ + raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ +} while (0) + #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..5344268e6e62 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index d0d188861ad6..d3afef9d8dbe 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -57,6 +57,7 @@ #define _raw_spin_lock(lock) __LOCK(lock) #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock) #define _raw_read_lock(lock) __LOCK(lock) #define _raw_write_lock(lock) __LOCK(lock) #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 4b082b5cac9e..db3ccb1dd614 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -363,6 +363,14 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) } EXPORT_SYMBOL(_raw_spin_lock_nested); +void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) +{ + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} +EXPORT_SYMBOL(_raw_spin_lock_bh_nested); + unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { -- cgit v1.2.3 From 97defe1ecf868b8127f8e62395499d6a06e4c4b1 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:20 +0100 Subject: rhashtable: Per bucket locks & deferred expansion/shrinking Introduces an array of spinlocks to protect bucket mutations. The number of spinlocks per CPU is configurable and selected based on the hash of the bucket. This allows for parallel insertions and removals of entries which do not share a lock. The patch also defers expansion and shrinking to a worker queue which allows insertion and removal from atomic context. Insertions and deletions may occur in parallel to it and are only held up briefly while the particular bucket is linked or unzipped. Mutations of the bucket table pointer is protected by a new mutex, read access is RCU protected. In the event of an expansion or shrinking, the new bucket table allocated is exposed as a so called future table as soon as the resize process starts. Lookups, deletions, and insertions will briefly use both tables. The future table becomes the main table after an RCU grace period and initial linking of the old to the new table was performed. Optimization of the chains to make use of the new number of buckets follows only the new table is in use. The side effect of this is that during that RCU grace period, a bucket traversal using any rht_for_each() variant on the main table will not see any insertions performed during the RCU grace period which would at that point land in the future table. The lookup will see them as it searches both tables if needed. Having multiple insertions and removals occur in parallel requires nelems to become an atomic counter. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 37 ++-- lib/rhashtable.c | 458 ++++++++++++++++++++++++++++++++++----------- net/netfilter/nft_hash.c | 27 ++- net/netlink/af_netlink.c | 15 +- 4 files changed, 384 insertions(+), 153 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f624d4b5045f..a1688f0a6193 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -19,6 +19,7 @@ #define _LINUX_RHASHTABLE_H #include +#include struct rhash_head { struct rhash_head __rcu *next; @@ -26,8 +27,17 @@ struct rhash_head { #define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) +/** + * struct bucket_table - Table of hash buckets + * @size: Number of hash buckets + * @locks_mask: Mask to apply before accessing locks[] + * @locks: Array of spinlocks protecting individual buckets + * @buckets: size * hash buckets + */ struct bucket_table { size_t size; + unsigned int locks_mask; + spinlock_t *locks; struct rhash_head __rcu *buckets[]; }; @@ -45,11 +55,11 @@ struct rhashtable; * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding * @min_shift: Minimum number of shifts while shrinking + * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand * @shrink_decision: If defined, may return true if table should shrink - * @mutex_is_held: Must return true if protecting mutex is held */ struct rhashtable_params { size_t nelem_hint; @@ -59,37 +69,42 @@ struct rhashtable_params { u32 hash_rnd; size_t max_shift; size_t min_shift; + size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; bool (*grow_decision)(const struct rhashtable *ht, size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); -#ifdef CONFIG_PROVE_LOCKING - int (*mutex_is_held)(void *parent); - void *parent; -#endif }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table + * @future_tbl: Table under construction during expansion/shrinking * @nelems: Number of elements in table * @shift: Current size (1 << shift) * @p: Configuration parameters + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @being_destroyed: True if table is set up for destruction */ struct rhashtable { struct bucket_table __rcu *tbl; - size_t nelems; + struct bucket_table __rcu *future_tbl; + atomic_t nelems; size_t shift; struct rhashtable_params p; + struct delayed_work run_work; + struct mutex mutex; + bool being_destroyed; }; #ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(const struct rhashtable *ht); +int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); #else -static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return 1; } @@ -112,11 +127,11 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); -void *rhashtable_lookup(const struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, +void *rhashtable_lookup(struct rhashtable *ht, const void *key); +void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg); -void rhashtable_destroy(const struct rhashtable *ht); +void rhashtable_destroy(struct rhashtable *ht); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e6b85c4a5828..312e3437c7bc 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -26,19 +26,42 @@ #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4UL +#define BUCKET_LOCKS_PER_CPU 128UL + +enum { + RHT_LOCK_NORMAL, + RHT_LOCK_NESTED, + RHT_LOCK_NESTED2, +}; + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +#define ASSERT_BUCKET_LOCK(TBL, HASH) \ + BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) #ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +int lockdep_rht_mutex_is_held(struct rhashtable *ht) { - return ht->p.mutex_is_held(ht->p.parent); + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { - return 1; + spinlock_t *lock = bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #endif @@ -66,7 +89,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) return hash; } -static u32 key_hashfn(const struct rhashtable *ht, const void *key, u32 len) +static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) { struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); u32 hash; @@ -95,7 +118,49 @@ static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) return pprev; } -static struct bucket_table *bucket_table_alloc(size_t nbuckets) +static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) +{ + unsigned int i, size; +#if defined(CONFIG_PROVE_LOCKING) + unsigned int nr_pcpus = 2; +#else + unsigned int nr_pcpus = num_possible_cpus(); +#endif + + nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); + size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); + + /* Never allocate more than one lock per bucket */ + size = min_t(unsigned int, size, tbl->size); + + if (sizeof(spinlock_t) != 0) { +#ifdef CONFIG_NUMA + if (size * sizeof(spinlock_t) > PAGE_SIZE) + tbl->locks = vmalloc(size * sizeof(spinlock_t)); + else +#endif + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), + GFP_KERNEL); + if (!tbl->locks) + return -ENOMEM; + for (i = 0; i < size; i++) + spin_lock_init(&tbl->locks[i]); + } + tbl->locks_mask = size - 1; + + return 0; +} + +static void bucket_table_free(const struct bucket_table *tbl) +{ + if (tbl) + kvfree(tbl->locks); + + kvfree(tbl); +} + +static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets) { struct bucket_table *tbl; size_t size; @@ -110,12 +175,12 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets) tbl->size = nbuckets; - return tbl; -} + if (alloc_bucket_locks(ht, tbl) < 0) { + bucket_table_free(tbl); + return NULL; + } -static void bucket_table_free(const struct bucket_table *tbl) -{ - kvfree(tbl); + return tbl; } /** @@ -126,7 +191,7 @@ static void bucket_table_free(const struct bucket_table *tbl) bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ - return ht->nelems > (new_size / 4 * 3); + return atomic_read(&ht->nelems) > (new_size / 4 * 3); } EXPORT_SYMBOL_GPL(rht_grow_above_75); @@ -138,41 +203,59 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ - return ht->nelems < (new_size * 3 / 10); + return atomic_read(&ht->nelems) < (new_size * 3 / 10); } EXPORT_SYMBOL_GPL(rht_shrink_below_30); static void hashtable_chain_unzip(const struct rhashtable *ht, const struct bucket_table *new_tbl, - struct bucket_table *old_tbl, size_t n) + struct bucket_table *old_tbl, + size_t old_hash) { struct rhash_head *he, *p, *next; - unsigned int h; + spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL; + unsigned int new_hash, new_hash2; + + ASSERT_BUCKET_LOCK(old_tbl, old_hash); /* Old bucket empty, no work needed. */ - p = rht_dereference(old_tbl->buckets[n], ht); + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); if (!p) return; + new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the * previous node p. Call the previous node p; */ - h = head_hashfn(ht, new_tbl, p); - rht_for_each_continue(he, p->next, old_tbl, n) { - if (head_hashfn(ht, new_tbl, he) != h) + rht_for_each_continue(he, p->next, old_tbl, old_hash) { + new_hash2 = head_hashfn(ht, new_tbl, he); + if (new_hash != new_hash2) break; p = he; } - RCU_INIT_POINTER(old_tbl->buckets[n], p->next); + rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); + + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + + /* If we have encountered an entry that maps to a different bucket in + * the new table, lock down that bucket as well as we might cut off + * the end of the chain. + */ + new_bucket_lock2 = bucket_lock(new_tbl, new_hash); + if (new_bucket_lock != new_bucket_lock2) + spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2); /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ next = NULL; if (he) { - rht_for_each_continue(he, he->next, old_tbl, n) { - if (head_hashfn(ht, new_tbl, he) == h) { + rht_for_each_continue(he, he->next, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { next = he; break; } @@ -182,7 +265,23 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Set p's next pointer to that subsequent node pointer, * bypassing the nodes which do not hash to p's bucket */ - RCU_INIT_POINTER(p->next, next); + rcu_assign_pointer(p->next, next); + + if (new_bucket_lock != new_bucket_lock2) + spin_unlock_bh(new_bucket_lock2); + spin_unlock_bh(new_bucket_lock); +} + +static void link_old_to_new(struct bucket_table *new_tbl, + unsigned int new_hash, struct rhash_head *entry) +{ + spinlock_t *new_bucket_lock; + + new_bucket_lock = bucket_lock(new_tbl, new_hash); + + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); + spin_unlock_bh(new_bucket_lock); } /** @@ -195,43 +294,59 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * - * The caller must ensure that no concurrent table mutations take place. - * It is however valid to have concurrent lookups if they are RCU protected. + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; - unsigned int i, h; - bool complete; + spinlock_t *old_bucket_lock; + unsigned int new_hash, old_hash; + bool complete = false; ASSERT_RHT_MUTEX(ht); if (ht->p.max_shift && ht->shift >= ht->p.max_shift) return 0; - new_tbl = bucket_table_alloc(old_tbl->size * 2); + new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; ht->shift++; - /* For each new bucket, search the corresponding old bucket - * for the first entry that hashes to the new bucket, and - * link the new bucket to that entry. Since all the entries - * which will end up in the new bucket appear in the same - * old bucket, this constructs an entirely valid new hash - * table, but with multiple buckets "zipped" together into a - * single imprecise chain. + /* Make insertions go into the new, empty table right away. Deletions + * and lookups will be attempted in both tables until we synchronize. + * The synchronize_rcu() guarantees for the new table to be picked up + * so no new additions go into the old table while we relink. + */ + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); + + /* For each new bucket, search the corresponding old bucket for the + * first entry that hashes to the new bucket, and link the end of + * newly formed bucket chain (containing entries added to future + * table) to that entry. Since all the entries which will end up in + * the new bucket appear in the same old bucket, this constructs an + * entirely valid new hash table, but with multiple buckets + * "zipped" together into a single imprecise chain. */ - for (i = 0; i < new_tbl->size; i++) { - h = rht_bucket_index(old_tbl, i); - rht_for_each(he, old_tbl, h) { - if (head_hashfn(ht, new_tbl, he) == i) { - RCU_INIT_POINTER(new_tbl->buckets[i], he); + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + old_hash = rht_bucket_index(old_tbl, new_hash); + old_bucket_lock = bucket_lock(old_tbl, old_hash); + + spin_lock_bh(old_bucket_lock); + rht_for_each(he, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { + link_old_to_new(new_tbl, new_hash, he); break; } } + spin_unlock_bh(old_bucket_lock); } /* Publish the new table pointer. Lookups may now traverse @@ -241,7 +356,7 @@ int rhashtable_expand(struct rhashtable *ht) rcu_assign_pointer(ht->tbl, new_tbl); /* Unzip interleaved hash chains */ - do { + while (!complete && !ht->being_destroyed) { /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. @@ -253,12 +368,17 @@ int rhashtable_expand(struct rhashtable *ht) * table): ... */ complete = true; - for (i = 0; i < old_tbl->size; i++) { - hashtable_chain_unzip(ht, new_tbl, old_tbl, i); - if (old_tbl->buckets[i] != NULL) + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + old_bucket_lock = bucket_lock(old_tbl, old_hash); + spin_lock_bh(old_bucket_lock); + + hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); + if (old_tbl->buckets[old_hash] != NULL) complete = false; + + spin_unlock_bh(old_bucket_lock); } - } while (!complete); + } bucket_table_free(old_tbl); return 0; @@ -272,38 +392,65 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); - unsigned int i; + struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); + spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2; + unsigned int new_hash; ASSERT_RHT_MUTEX(ht); if (ht->shift <= ht->p.min_shift) return 0; - ntbl = bucket_table_alloc(tbl->size / 2); - if (ntbl == NULL) + new_tbl = bucket_table_alloc(ht, tbl->size / 2); + if (new_tbl == NULL) return -ENOMEM; - ht->shift--; + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); - /* Link each bucket in the new table to the first bucket - * in the old table that contains entries which will hash - * to the new bucket. + /* Link the first entry in the old bucket to the end of the + * bucket in the new table. As entries are concurrently being + * added to the new table, lock down the new bucket. As we + * always divide the size in half when shrinking, each bucket + * in the new table maps to exactly two buckets in the old + * table. + * + * As removals can occur concurrently on the old table, we need + * to lock down both matching buckets in the old table. */ - for (i = 0; i < ntbl->size; i++) { - ntbl->buckets[i] = tbl->buckets[i]; - RCU_INIT_POINTER(*bucket_tail(ntbl, i), - tbl->buckets[i + ntbl->size]); - + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + old_bucket_lock1 = bucket_lock(tbl, new_hash); + old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + + spin_lock_bh(old_bucket_lock1); + spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); + + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash]); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash + new_tbl->size]); + + spin_unlock_bh(new_bucket_lock); + spin_unlock_bh(old_bucket_lock2); + spin_unlock_bh(old_bucket_lock1); } /* Publish the new, valid hash table */ - rcu_assign_pointer(ht->tbl, ntbl); + rcu_assign_pointer(ht->tbl, new_tbl); + ht->shift--; /* Wait for readers. No new readers will have references to the * old hash table. @@ -316,31 +463,63 @@ int rhashtable_shrink(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_shrink); +static void rht_deferred_worker(struct work_struct *work) +{ + struct rhashtable *ht; + struct bucket_table *tbl; + + ht = container_of(work, struct rhashtable, run_work.work); + mutex_lock(&ht->mutex); + tbl = rht_dereference(ht->tbl, ht); + + if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + rhashtable_expand(ht); + else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) + rhashtable_shrink(ht); + + mutex_unlock(&ht->mutex); +} + /** * rhashtable_insert - insert object into hash hash table * @ht: hash table * @obj: pointer to hash head inside object * - * Will automatically grow the table via rhashtable_expand() if the the - * grow_decision function specified at rhashtable_init() returns true. + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. * - * The caller must ensure that no concurrent table mutations occur. It is - * however valid to have concurrent lookups if they are RCU protected. + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - u32 hash; + struct bucket_table *tbl; + spinlock_t *lock; + unsigned hash; - ASSERT_RHT_MUTEX(ht); + rcu_read_lock(); + tbl = rht_dereference_rcu(ht->future_tbl, ht); hash = head_hashfn(ht, tbl, obj); + lock = bucket_lock(tbl, hash); + + spin_lock_bh(lock); RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); rcu_assign_pointer(tbl->buckets[hash], obj); - ht->nelems++; + spin_unlock_bh(lock); - if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - rhashtable_expand(ht); + atomic_inc(&ht->nelems); + + /* Only grow the table if no resizing is currently in progress. */ + if (ht->tbl != ht->future_tbl && + ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + schedule_delayed_work(&ht->run_work, 0); + + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -361,32 +540,56 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); */ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *tbl; struct rhash_head __rcu **pprev; struct rhash_head *he; - u32 h; + spinlock_t *lock; + unsigned int hash; - ASSERT_RHT_MUTEX(ht); + rcu_read_lock(); + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = head_hashfn(ht, tbl, obj); - h = head_hashfn(ht, tbl, obj); + lock = bucket_lock(tbl, hash); + spin_lock_bh(lock); - pprev = &tbl->buckets[h]; - rht_for_each(he, tbl, h) { +restart: + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { if (he != obj) { pprev = &he->next; continue; } - RCU_INIT_POINTER(*pprev, he->next); - ht->nelems--; + rcu_assign_pointer(*pprev, obj->next); + atomic_dec(&ht->nelems); - if (ht->p.shrink_decision && + spin_unlock_bh(lock); + + if (ht->tbl != ht->future_tbl && + ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht); + schedule_delayed_work(&ht->run_work, 0); + + rcu_read_unlock(); return true; } + if (tbl != rht_dereference_rcu(ht->tbl, ht)) { + spin_unlock_bh(lock); + + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = head_hashfn(ht, tbl, obj); + + lock = bucket_lock(tbl, hash); + spin_lock_bh(lock); + goto restart; + } + + spin_unlock_bh(lock); + rcu_read_unlock(); + return false; } EXPORT_SYMBOL_GPL(rhashtable_remove); @@ -402,25 +605,35 @@ EXPORT_SYMBOL_GPL(rhashtable_remove); * This lookup function may only be used for fixed key hash table (key_len * paramter set). It will BUG() if used inappropriately. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. */ -void *rhashtable_lookup(const struct rhashtable *ht, const void *key) +void *rhashtable_lookup(struct rhashtable *ht, const void *key) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + const struct bucket_table *tbl, *old_tbl; struct rhash_head *he; - u32 h; + u32 hash; BUG_ON(!ht->p.key_len); - h = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl, h) { + rcu_read_lock(); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); + hash = key_hashfn(ht, key, ht->p.key_len); +restart: + rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) continue; + rcu_read_unlock(); return rht_obj(ht, he); } + if (unlikely(tbl != old_tbl)) { + tbl = old_tbl; + goto restart; + } + + rcu_read_unlock(); return NULL; } EXPORT_SYMBOL_GPL(rhashtable_lookup); @@ -435,25 +648,36 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); * Traverses the bucket chain behind the provided hash value and calls the * specified compare function for each entry. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. * * Returns the first entry on which the compare function returned true. */ -void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, +void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + const struct bucket_table *tbl, *old_tbl; struct rhash_head *he; u32 hash; + rcu_read_lock(); + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); hash = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl, hash) { +restart: + rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { if (!compare(rht_obj(ht, he), arg)) continue; + rcu_read_unlock(); return rht_obj(ht, he); } + if (unlikely(tbl != old_tbl)) { + tbl = old_tbl; + goto restart; + } + rcu_read_unlock(); + return NULL; } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); @@ -485,9 +709,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif * }; * * Configuration Example 2: Variable length keys @@ -507,9 +728,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -529,18 +747,29 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size); + memset(ht, 0, sizeof(*ht)); + mutex_init(&ht->mutex); + memcpy(&ht->p, params, sizeof(*params)); + + if (params->locks_mul) + ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); + else + ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; + + tbl = bucket_table_alloc(ht, size); if (tbl == NULL) return -ENOMEM; - memset(ht, 0, sizeof(*ht)); ht->shift = ilog2(tbl->size); - memcpy(&ht->p, params, sizeof(*params)); RCU_INIT_POINTER(ht->tbl, tbl); + RCU_INIT_POINTER(ht->future_tbl, tbl); if (!ht->p.hash_rnd) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); + if (ht->p.grow_decision || ht->p.shrink_decision) + INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker); + return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); @@ -553,9 +782,16 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * has to make sure that no resizing may happen by unpublishing the hashtable * and waiting for the quiescent cycle before releasing the bucket array. */ -void rhashtable_destroy(const struct rhashtable *ht) +void rhashtable_destroy(struct rhashtable *ht) { - bucket_table_free(ht->tbl); + ht->being_destroyed = true; + + mutex_lock(&ht->mutex); + + cancel_delayed_work(&ht->run_work); + bucket_table_free(rht_dereference(ht->tbl, ht)); + + mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_destroy); @@ -570,13 +806,6 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_PTR ((void *) 0xdeadbeef) #define TEST_NEXPANDS 4 -#ifdef CONFIG_PROVE_LOCKING -static int test_mutex_is_held(void *parent) -{ - return 1; -} -#endif - struct test_obj { void *ptr; int value; @@ -646,10 +875,10 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) i, tbl->buckets[i], cnt); } - pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", - total, ht->nelems, TEST_ENTRIES); + pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", + total, atomic_read(&ht->nelems), TEST_ENTRIES); - if (total != ht->nelems || total != TEST_ENTRIES) + if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) pr_warn("Test failed: Total count mismatch ^^^"); } @@ -688,7 +917,9 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table expansion iteration %u...\n", i); + mutex_lock(&ht->mutex); rhashtable_expand(ht); + mutex_unlock(&ht->mutex); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -698,7 +929,9 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table shrinkage iteration %u...\n", i); + mutex_lock(&ht->mutex); rhashtable_shrink(ht); + mutex_unlock(&ht->mutex); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -741,9 +974,6 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, -#ifdef CONFIG_PROVE_LOCKING - .mutex_is_held = &test_mutex_is_held, -#endif .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 7f903cf9a1b9..75887d7d2c6a 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set, const struct nft_data *key, struct nft_data *data) { - const struct rhashtable *priv = nft_set_priv(set); + struct rhashtable *priv = nft_set_priv(set); const struct nft_hash_elem *he; he = rhashtable_lookup(priv, key); @@ -113,7 +113,7 @@ static bool nft_hash_compare(void *ptr, void *arg) static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) { - const struct rhashtable *priv = nft_set_priv(set); + struct rhashtable *priv = nft_set_priv(set); struct nft_compare_arg arg = { .set = set, .elem = elem, @@ -129,7 +129,7 @@ static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, struct nft_set_iter *iter) { - const struct rhashtable *priv = nft_set_priv(set); + struct rhashtable *priv = nft_set_priv(set); const struct bucket_table *tbl; const struct nft_hash_elem *he; struct nft_set_elem elem; @@ -162,13 +162,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) return sizeof(struct rhashtable); } -#ifdef CONFIG_PROVE_LOCKING -static int lockdep_nfnl_lock_is_held(void *parent) -{ - return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES); -} -#endif - static int nft_hash_init(const struct nft_set *set, const struct nft_set_desc *desc, const struct nlattr * const tb[]) @@ -182,9 +175,6 @@ static int nft_hash_init(const struct nft_set *set, .hashfn = jhash, .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, -#ifdef CONFIG_PROVE_LOCKING - .mutex_is_held = lockdep_nfnl_lock_is_held, -#endif }; return rhashtable_init(priv, ¶ms); @@ -192,16 +182,23 @@ static int nft_hash_init(const struct nft_set *set, static void nft_hash_destroy(const struct nft_set *set) { - const struct rhashtable *priv = nft_set_priv(set); - const struct bucket_table *tbl = priv->tbl; + struct rhashtable *priv = nft_set_priv(set); + const struct bucket_table *tbl; struct nft_hash_elem *he; struct rhash_head *pos, *next; unsigned int i; + /* Stop an eventual async resizing */ + priv->being_destroyed = true; + mutex_lock(&priv->mutex); + + tbl = rht_dereference(priv->tbl, priv); for (i = 0; i < tbl->size; i++) { rht_for_each_entry_safe(he, pos, next, tbl, i, node) nft_hash_elem_destroy(set, he); } + mutex_unlock(&priv->mutex); + rhashtable_destroy(priv); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 57449b6089c2..738c3bfaa564 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -114,15 +114,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); DEFINE_MUTEX(nl_sk_hash_lock); EXPORT_SYMBOL_GPL(nl_sk_hash_lock); -#ifdef CONFIG_PROVE_LOCKING -static int lockdep_nl_sk_hash_is_held(void *parent) -{ - if (debug_locks) - return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); - return 1; -} -#endif - static ATOMIC_NOTIFIER_HEAD(netlink_chain); static DEFINE_SPINLOCK(netlink_tap_lock); @@ -1063,7 +1054,8 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) goto err; err = -ENOMEM; - if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX)) + if (BITS_PER_LONG > 32 && + unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX)) goto err; nlk_sk(sk)->portid = portid; @@ -3122,9 +3114,6 @@ static int __init netlink_proto_init(void) .max_shift = 16, /* 64K */ .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, -#ifdef CONFIG_PROVE_LOCKING - .mutex_is_held = lockdep_nl_sk_hash_is_held, -#endif }; if (err != 0) -- cgit v1.2.3 From f89bd6f87a53ce5a7d60662429591ebac2745c10 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:21 +0100 Subject: rhashtable: Supports for nulls marker In order to allow for wider usage of rhashtable, use a special nulls marker to terminate each chain. The reason for not using the existing nulls_list is that the prev pointer usage would not be valid as entries can be linked in two different buckets at the same time. The 4 nulls base bits can be set through the rhashtable_params structure like this: struct rhashtable_params params = { [...] .nulls_base = (1U << RHT_BASE_SHIFT), }; This reduces the hash length from 32 bits to 27 bits. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/list_nulls.h | 3 ++- include/linux/rhashtable.h | 57 ++++++++++++++++++++++++++++++++++++++-------- lib/rhashtable.c | 37 ++++++++++++++++++++++++------ 3 files changed, 79 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 5d10ae364b5e..e8c300e06438 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -21,8 +21,9 @@ struct hlist_nulls_head { struct hlist_nulls_node { struct hlist_nulls_node *next, **pprev; }; +#define NULLS_MARKER(value) (1UL | (((long)value) << 1)) #define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ - ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1))) + ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) /** diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index a1688f0a6193..de7cac753b09 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -18,15 +18,32 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H -#include +#include #include +/* + * The end of the chain is marked with a special nulls marks which has + * the following format: + * + * +-------+-----------------------------------------------------+-+ + * | Base | Hash |1| + * +-------+-----------------------------------------------------+-+ + * + * Base (4 bits) : Reserved to distinguish between multiple tables. + * Specified via &struct rhashtable_params.nulls_base. + * Hash (27 bits): Full hash (unmasked) of first element added to bucket + * 1 (1 bit) : Nulls marker (always set) + * + * The remaining bits of the next pointer remain unused for now. + */ +#define RHT_BASE_BITS 4 +#define RHT_HASH_BITS 27 +#define RHT_BASE_SHIFT RHT_HASH_BITS + struct rhash_head { struct rhash_head __rcu *next; }; -#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) - /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets @@ -55,6 +72,7 @@ struct rhashtable; * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding * @min_shift: Minimum number of shifts while shrinking + * @nulls_base: Base value to generate nulls marker * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object @@ -69,6 +87,7 @@ struct rhashtable_params { u32 hash_rnd; size_t max_shift; size_t min_shift; + u32 nulls_base; size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; @@ -100,6 +119,24 @@ struct rhashtable { bool being_destroyed; }; +static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) +{ + return NULLS_MARKER(ht->p.nulls_base + hash); +} + +#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ + ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) + +static inline bool rht_is_a_nulls(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr & 1); +} + +static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr) >> 1; +} + #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); @@ -157,7 +194,7 @@ void rhashtable_destroy(struct rhashtable *ht); */ #define rht_for_each_continue(pos, head, tbl, hash) \ for (pos = rht_dereference_bucket(head, tbl, hash); \ - pos; \ + !rht_is_a_nulls(pos); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** @@ -180,7 +217,7 @@ void rhashtable_destroy(struct rhashtable *ht); */ #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ for (pos = rht_dereference_bucket(head, tbl, hash); \ - pos && rht_entry(tpos, pos, member); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** @@ -209,9 +246,9 @@ void rhashtable_destroy(struct rhashtable *ht); */ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ - next = pos ? rht_dereference_bucket(pos->next, tbl, hash) \ - : NULL; \ - pos && rht_entry(tpos, pos, member); \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = next) /** @@ -228,7 +265,7 @@ void rhashtable_destroy(struct rhashtable *ht); #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ for (({barrier(); }), \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \ - pos; \ + !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) /** @@ -260,7 +297,7 @@ void rhashtable_destroy(struct rhashtable *ht); #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ for (({barrier(); }), \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \ - pos && rht_entry(tpos, pos, member); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) /** diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 312e3437c7bc..cbad192d3b3d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -28,6 +28,9 @@ #define HASH_MIN_SIZE 4UL #define BUCKET_LOCKS_PER_CPU 128UL +/* Base bits plus 1 bit for nulls marker */ +#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + enum { RHT_LOCK_NORMAL, RHT_LOCK_NESTED, @@ -86,7 +89,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, ht->p.hash_rnd); - return hash; + return hash >> HASH_RESERVED_SPACE; } static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) @@ -95,6 +98,7 @@ static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) u32 hash; hash = ht->p.hashfn(key, len, ht->p.hash_rnd); + hash >>= HASH_RESERVED_SPACE; return rht_bucket_index(tbl, hash); } @@ -111,7 +115,7 @@ static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) struct rhash_head __rcu **pprev; for (pprev = &tbl->buckets[n]; - rht_dereference_bucket(*pprev, tbl, n); + !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) ; @@ -164,6 +168,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, { struct bucket_table *tbl; size_t size; + int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); @@ -180,6 +185,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, return NULL; } + for (i = 0; i < nbuckets; i++) + INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + return tbl; } @@ -221,7 +229,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Old bucket empty, no work needed. */ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, old_hash); - if (!p) + if (rht_is_a_nulls(p)) return; new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); @@ -252,8 +260,8 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ - next = NULL; - if (he) { + INIT_RHT_NULLS_HEAD(next, ht, old_hash); + if (!rht_is_a_nulls(he)) { rht_for_each_continue(he, he->next, old_tbl, old_hash) { if (head_hashfn(ht, new_tbl, he) == new_hash) { next = he; @@ -369,11 +377,15 @@ int rhashtable_expand(struct rhashtable *ht) */ complete = true; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + struct rhash_head *head; + old_bucket_lock = bucket_lock(old_tbl, old_hash); spin_lock_bh(old_bucket_lock); hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); - if (old_tbl->buckets[old_hash] != NULL) + head = rht_dereference_bucket(old_tbl->buckets[old_hash], + old_tbl, old_hash); + if (!rht_is_a_nulls(head)) complete = false; spin_unlock_bh(old_bucket_lock); @@ -498,6 +510,7 @@ static void rht_deferred_worker(struct work_struct *work) void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl; + struct rhash_head *head; spinlock_t *lock; unsigned hash; @@ -508,7 +521,12 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) lock = bucket_lock(tbl, hash); spin_lock_bh(lock); - RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(obj->next, ht, hash); + else + RCU_INIT_POINTER(obj->next, head); + rcu_assign_pointer(tbl->buckets[hash], obj); spin_unlock_bh(lock); @@ -709,6 +727,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, + * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@ -741,6 +760,9 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) + return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, ilog2(HASH_MIN_SIZE)); @@ -974,6 +996,7 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, + .nulls_base = (3U << RHT_BASE_SHIFT), .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; -- cgit v1.2.3 From 86b35b64ed7b6b38305dee67a0f2ddff2ca5455d Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Sun, 4 Jan 2015 15:25:09 +0800 Subject: rhashtable: fix missing header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixup below build error: include/linux/rhashtable.h: At top level: include/linux/rhashtable.h:118:34: error: field ‘mutex’ has incomplete type Signed-off-by: Ying Xue Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index de7cac753b09..de1459c74c4d 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -20,6 +20,7 @@ #include #include +#include /* * The end of the chain is marked with a special nulls marks which has -- cgit v1.2.3 From a3449ded128d037e6b2bd7a59b0741de56506066 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Sun, 4 Jan 2015 15:24:35 +0800 Subject: list_nulls: fix missing header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixup below build error: include/linux/list_nulls.h: In function ‘hlist_nulls_del’: include/linux/list_nulls.h:84:13: error: ‘LIST_POISON2’ undeclared (first use in this function) Signed-off-by: Ying Xue Signed-off-by: David S. Miller --- include/linux/list_nulls.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index e8c300e06438..f266661d2666 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -1,6 +1,9 @@ #ifndef _LINUX_LIST_NULLS_H #define _LINUX_LIST_NULLS_H +#include +#include + /* * Special version of lists, where end of list is not a NULL pointer, * but a 'nulls' marker, which can have many different values. -- cgit v1.2.3 From 224d019c4fbba242041e9b25a926ba873b7da1e2 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 5 Jan 2015 13:56:14 -0800 Subject: ip: Move checksum convert defines to inet Move convert_csum from udp_sock to inet_sock. This allows the possibility that we can use convert checksum for different types of sockets and also allows convert checksum to be enabled from inet layer (what we'll want to do when enabling IP_CHECKSUM cmsg). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/udp.h | 16 +--------------- include/net/inet_sock.h | 17 +++++++++++++++++ net/ipv4/fou.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv4/udp_tunnel.c | 2 +- net/ipv6/udp.c | 2 +- 6 files changed, 22 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/udp.h b/include/linux/udp.h index ee3277593222..247cfdcc4b08 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,11 +49,7 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ - convert_csum:1;/* On receive, convert checksum - * unnecessary to checksum complete - * if possible. - */ + no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -static inline void udp_set_convert_csum(struct sock *sk, bool val) -{ - udp_sk(sk)->convert_csum = val; -} - -static inline bool udp_get_convert_csum(struct sock *sk) -{ - return udp_sk(sk)->convert_csum; -} - #define udp_portaddr_for_each_entry(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a829b77523cf..360b110b3e36 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -184,6 +184,7 @@ struct inet_sock { mc_all:1, nodefrag:1; __u8 rcv_tos; + __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; @@ -250,4 +251,20 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) return flags; } +static inline void inet_inc_convert_csum(struct sock *sk) +{ + inet_sk(sk)->convert_csum++; +} + +static inline void inet_dec_convert_csum(struct sock *sk) +{ + if (inet_sk(sk)->convert_csum > 0) + inet_sk(sk)->convert_csum--; +} + +static inline bool inet_get_convert_csum(struct sock *sk) +{ + return !!inet_sk(sk)->convert_csum; +} + #endif /* _INET_SOCK_H */ diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index b986298a7ba3..2197c36f722f 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -490,7 +490,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg, sk->sk_user_data = fou; fou->sock = sock; - udp_set_convert_csum(sk, true); + inet_inc_convert_csum(sk); sk->sk_allocation = GFP_ATOMIC; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 13b4dcf86ef6..53358d88f110 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1806,7 +1806,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; - if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, inet_compute_pseudo); diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 1671263e5fa0..9996e63ed304 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -63,7 +63,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, inet_sk(sk)->mc_loop = 0; /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ - udp_set_convert_csum(sk, true); + inet_inc_convert_csum(sk); rcu_assign_sk_user_data(sk, cfg->sk_user_data); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 189dc4ae3eca..e41f017cd479 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -909,7 +909,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; } - if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, ip6_compute_pseudo); -- cgit v1.2.3 From 8b3a38daff6f50027039d6979b9eb026907508eb Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 23 Dec 2014 19:04:23 +0100 Subject: brcmfmac: Add support for bcm43340/1 wireless chipsets This patch adds support for the bcm43340 and bcm43341 wireless chipsets. These two chipsets are identical from wireless parts perspective. As such they use the same firmware image. Cc: Samuel Ortiz Cc: Rob Herring Signed-off-by: John Stultz [arend@broadcom.com: squash to single commit, remove 43341 chipid] Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | 2 ++ drivers/net/wireless/brcm80211/brcmfmac/chip.c | 1 + drivers/net/wireless/brcm80211/brcmfmac/sdio.c | 5 +++++ drivers/net/wireless/brcm80211/include/brcm_hw_ids.h | 3 +++ include/linux/mmc/sdio_ids.h | 6 ++++-- 5 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 9880dae2a569..dffd9e44f5b6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -1005,6 +1005,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(BRCM_SDIO_4329_DEVICE_ID), BRCMF_SDIO_DEVICE(BRCM_SDIO_4330_DEVICE_ID), BRCMF_SDIO_DEVICE(BRCM_SDIO_4334_DEVICE_ID), + BRCMF_SDIO_DEVICE(BRCM_SDIO_43340_DEVICE_ID), + BRCMF_SDIO_DEVICE(BRCM_SDIO_43341_DEVICE_ID), BRCMF_SDIO_DEVICE(BRCM_SDIO_43362_DEVICE_ID), BRCMF_SDIO_DEVICE(BRCM_SDIO_4335_4339_DEVICE_ID), BRCMF_SDIO_DEVICE(BRCM_SDIO_4354_DEVICE_ID), diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c index 519b79ebaabd..04d2ca0d87d6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c @@ -481,6 +481,7 @@ static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci) ci->pub.ramsize = 0x48000; break; case BRCM_CC_4334_CHIP_ID: + case BRCM_CC_43340_CHIP_ID: ci->pub.ramsize = 0x80000; break; case BRCM_CC_4335_CHIP_ID: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c index 0b0d51a61060..551da356a5bd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -608,6 +608,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = { #define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt" #define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin" #define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt" +#define BCM43340_FIRMWARE_NAME "brcm/brcmfmac43340-sdio.bin" +#define BCM43340_NVRAM_NAME "brcm/brcmfmac43340-sdio.txt" #define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin" #define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt" #define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin" @@ -629,6 +631,8 @@ MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME); MODULE_FIRMWARE(BCM4330_NVRAM_NAME); MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME); MODULE_FIRMWARE(BCM4334_NVRAM_NAME); +MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME); +MODULE_FIRMWARE(BCM43340_NVRAM_NAME); MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME); MODULE_FIRMWARE(BCM4335_NVRAM_NAME); MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME); @@ -660,6 +664,7 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = { { BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) }, { BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) }, { BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) }, + { BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) }, { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }, diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h index 6996fcc144cf..00215efbc13b 100644 --- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h @@ -34,6 +34,7 @@ #define BRCM_CC_4329_CHIP_ID 0x4329 #define BRCM_CC_4330_CHIP_ID 0x4330 #define BRCM_CC_4334_CHIP_ID 0x4334 +#define BRCM_CC_43340_CHIP_ID 43340 #define BRCM_CC_43362_CHIP_ID 43362 #define BRCM_CC_4335_CHIP_ID 0x4335 #define BRCM_CC_4339_CHIP_ID 0x4339 @@ -51,6 +52,8 @@ #define BRCM_SDIO_4329_DEVICE_ID BRCM_CC_4329_CHIP_ID #define BRCM_SDIO_4330_DEVICE_ID BRCM_CC_4330_CHIP_ID #define BRCM_SDIO_4334_DEVICE_ID BRCM_CC_4334_CHIP_ID +#define BRCM_SDIO_43340_DEVICE_ID BRCM_CC_43340_CHIP_ID +#define BRCM_SDIO_43341_DEVICE_ID 43341 #define BRCM_SDIO_43362_DEVICE_ID BRCM_CC_43362_CHIP_ID #define BRCM_SDIO_4335_4339_DEVICE_ID BRCM_CC_4335_CHIP_ID #define BRCM_SDIO_4354_DEVICE_ID BRCM_CC_4354_CHIP_ID diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 0f01fe065424..996807963716 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -24,13 +24,15 @@ * Vendors and devices. Sort key: vendor first, device next. */ #define SDIO_VENDOR_ID_BROADCOM 0x02d0 -#define SDIO_DEVICE_ID_BROADCOM_43143 43143 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 -#define SDIO_DEVICE_ID_BROADCOM_43362 43362 +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_VENDOR_ID_INTEL 0x0089 -- cgit v1.2.3 From 2f4383667d57d1c719070db86b14277277752841 Mon Sep 17 00:00:00 2001 From: Ed Swierk Date: Fri, 2 Jan 2015 17:27:56 -0800 Subject: ethtool: Extend ethtool plugin module eeprom API to phylib This patch extends the ethtool plugin module eeprom API to support cards whose phy support is delegated to a separate driver. The handlers for ETHTOOL_GMODULEINFO and ETHTOOL_GMODULEEEPROM call the module_info and module_eeprom functions if the phy driver provides them; otherwise the handlers call the equivalent ethtool_ops functions provided by network drivers with built-in phy support. Signed-off-by: Ed Swierk Signed-off-by: David S. Miller --- include/linux/phy.h | 9 +++++++++ net/core/ethtool.c | 45 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 43 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 22af8f8f5802..9c189a1fa3a2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -565,6 +565,15 @@ struct phy_driver { void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, int devnum, int regnum, u32 val); + /* Get the size and type of the eeprom contained within a plug-in + * module */ + int (*module_info)(struct phy_device *dev, + struct ethtool_modinfo *modinfo); + + /* Get the eeprom information from the plug-in module */ + int (*module_eeprom)(struct phy_device *dev, + struct ethtool_eeprom *ee, u8 *data); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 550892cd6b3f..91f74f3eb204 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1597,20 +1597,31 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) return err; } +static int __ethtool_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + if (phydev && phydev->drv && phydev->drv->module_info) + return phydev->drv->module_info(phydev, modinfo); + + if (ops->get_module_info) + return ops->get_module_info(dev, modinfo); + + return -EOPNOTSUPP; +} + static int ethtool_get_module_info(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_modinfo modinfo; - const struct ethtool_ops *ops = dev->ethtool_ops; - - if (!ops->get_module_info) - return -EOPNOTSUPP; if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) return -EFAULT; - ret = ops->get_module_info(dev, &modinfo); + ret = __ethtool_get_module_info(dev, &modinfo); if (ret) return ret; @@ -1620,21 +1631,33 @@ static int ethtool_get_module_info(struct net_device *dev, return 0; } +static int __ethtool_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + if (phydev && phydev->drv && phydev->drv->module_eeprom) + return phydev->drv->module_eeprom(phydev, ee, data); + + if (ops->get_module_eeprom) + return ops->get_module_eeprom(dev, ee, data); + + return -EOPNOTSUPP; +} + static int ethtool_get_module_eeprom(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_modinfo modinfo; - const struct ethtool_ops *ops = dev->ethtool_ops; - - if (!ops->get_module_info || !ops->get_module_eeprom) - return -EOPNOTSUPP; - ret = ops->get_module_info(dev, &modinfo); + ret = __ethtool_get_module_info(dev, &modinfo); if (ret) return ret; - return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom, + return ethtool_get_any_eeprom(dev, useraddr, + __ethtool_get_module_eeprom, modinfo.eeprom_len); } -- cgit v1.2.3 From db30485408326a6f466a843b291b23535f63eda0 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:54 +0800 Subject: rhashtable: involve rhashtable_lookup_insert routine Involve a new function called rhashtable_lookup_insert() which makes lookup and insertion atomic under bucket lock protection, helping us avoid to introduce an extra lock when we search and insert an object into hash table. Signed-off-by: Ying Xue Signed-off-by: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 1 + lib/rhashtable.c | 97 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 83 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index de1459c74c4d..73c913f31574 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -168,6 +168,7 @@ int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg); +bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); void rhashtable_destroy(struct rhashtable *ht); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 20006854fce0..4430233c4e11 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) schedule_delayed_work(&ht->run_work, 0); } +static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, + struct bucket_table *tbl, u32 hash) +{ + struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], + tbl, hash); + + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(obj->next, ht, hash); + else + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + + rhashtable_wakeup_worker(ht); +} + /** - * rhashtable_insert - insert object into hash hash table + * rhashtable_insert - insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl; - struct rhash_head *head; spinlock_t *lock; unsigned hash; @@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) lock = bucket_lock(tbl, hash); spin_lock_bh(lock); - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); - if (rht_is_a_nulls(head)) - INIT_RHT_NULLS_HEAD(obj->next, ht, hash); - else - RCU_INIT_POINTER(obj->next, head); - - rcu_assign_pointer(tbl->buckets[hash], obj); + __rhashtable_insert(ht, obj, tbl, hash); spin_unlock_bh(lock); - atomic_inc(&ht->nelems); - - rhashtable_wakeup_worker(ht); - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * - * Will automatically shrink the table via rhashtable_expand() if the the + * Will automatically shrink the table via rhashtable_expand() if the * shrink_decision function specified at rhashtable_init() returns true. * * The caller must ensure that no concurrent table mutations occur. It is @@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg) * for a entry with an identical key. The first matching entry is returned. * * This lookup function may only be used for fixed key hash table (key_len - * paramter set). It will BUG() if used inappropriately. + * parameter set). It will BUG() if used inappropriately. * * Lookups may occur in parallel with hashtable mutations and resizing. */ @@ -702,6 +709,66 @@ restart: } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); +/** + * rhashtable_lookup_insert - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) +{ + struct bucket_table *new_tbl, *old_tbl; + spinlock_t *new_bucket_lock, *old_bucket_lock; + u32 new_hash, old_hash; + bool success = true; + + BUG_ON(!ht->p.key_len); + + rcu_read_lock(); + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + old_hash = head_hashfn(ht, old_tbl, obj); + old_bucket_lock = bucket_lock(old_tbl, old_hash); + spin_lock_bh(old_bucket_lock); + + new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = head_hashfn(ht, new_tbl, obj); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + if (unlikely(old_tbl != new_tbl)) + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + + if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) { + success = false; + goto exit; + } + + __rhashtable_insert(ht, obj, new_tbl, new_hash); + +exit: + if (unlikely(old_tbl != new_tbl)) + spin_unlock_bh(new_bucket_lock); + spin_unlock_bh(old_bucket_lock); + + rcu_read_unlock(); + + return success; +} +EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); + static size_t rounded_hashtable_size(struct rhashtable_params *params) { return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), -- cgit v1.2.3 From c0c09bfdc4150b3918526660768585cd477adf35 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:56 +0800 Subject: rhashtable: avoid unnecessary wakeup for worker queue Move condition statements of verifying whether hash table size exceeds its maximum threshold or reaches its minimum threshold from resizing functions to resizing decision functions, avoiding unnecessary wakeup for worker queue thread. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 +- lib/rhashtable.c | 18 +++++++----------- 2 files changed, 8 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 73c913f31574..326acd8c2e9f 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -113,7 +113,7 @@ struct rhashtable { struct bucket_table __rcu *tbl; struct bucket_table __rcu *future_tbl; atomic_t nelems; - size_t shift; + atomic_t shift; struct rhashtable_params p; struct delayed_work run_work; struct mutex mutex; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 1aef942976fe..7fb474b18f1b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -199,7 +199,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ - return atomic_read(&ht->nelems) > (new_size / 4 * 3); + return atomic_read(&ht->nelems) > (new_size / 4 * 3) && + (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); } EXPORT_SYMBOL_GPL(rht_grow_above_75); @@ -211,7 +212,8 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ - return atomic_read(&ht->nelems) < (new_size * 3 / 10); + return atomic_read(&ht->nelems) < (new_size * 3 / 10) && + (atomic_read(&ht->shift) > ht->p.min_shift); } EXPORT_SYMBOL_GPL(rht_shrink_below_30); @@ -318,14 +320,11 @@ int rhashtable_expand(struct rhashtable *ht) ASSERT_RHT_MUTEX(ht); - if (ht->p.max_shift && ht->shift >= ht->p.max_shift) - return 0; - new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; - ht->shift++; + atomic_inc(&ht->shift); /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. @@ -421,9 +420,6 @@ int rhashtable_shrink(struct rhashtable *ht) ASSERT_RHT_MUTEX(ht); - if (ht->shift <= ht->p.min_shift) - return 0; - new_tbl = bucket_table_alloc(ht, tbl->size / 2); if (new_tbl == NULL) return -ENOMEM; @@ -462,7 +458,7 @@ int rhashtable_shrink(struct rhashtable *ht) /* Publish the new, valid hash table */ rcu_assign_pointer(ht->tbl, new_tbl); - ht->shift--; + atomic_dec(&ht->shift); /* Wait for readers. No new readers will have references to the * old hash table. @@ -851,7 +847,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (tbl == NULL) return -ENOMEM; - ht->shift = ilog2(tbl->size); + atomic_set(&ht->shift, ilog2(tbl->size)); RCU_INIT_POINTER(ht->tbl, tbl); RCU_INIT_POINTER(ht->future_tbl, tbl); -- cgit v1.2.3 From 7a868d1e9ab3c534c5ad44e3e5dc46753a1e5636 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Mon, 12 Jan 2015 14:52:22 +0800 Subject: rhashtable: involve rhashtable_lookup_compare_insert routine Introduce a new function called rhashtable_lookup_compare_insert() which is very similar to rhashtable_lookup_insert(). But the former makes use of users' given compare function to look for an object, and then inserts it into hash table if found. As the entire process of search and insertion is under protection of per bucket lock, this can help users to avoid the involvement of extra lock. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 5 +++++ lib/rhashtable.c | 42 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 326acd8c2e9f..7b9bd77ed684 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -168,7 +168,12 @@ int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg); + bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); +bool rhashtable_lookup_compare_insert(struct rhashtable *ht, + struct rhash_head *obj, + bool (*compare)(void *, void *), + void *arg); void rhashtable_destroy(struct rhashtable *ht); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 8023b554905c..ed6ae1ad304c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -726,6 +726,43 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); * to rhashtable_init(). */ bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = rht_obj(ht, obj) + ht->p.key_offset, + }; + + BUG_ON(!ht->p.key_len); + + return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, + &arg); +} +EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); + +/** + * rhashtable_lookup_compare_insert - search and insert object to hash table + * with compare function + * @ht: hash table + * @obj: pointer to hash head inside object + * @compare: compare function, must return true on match + * @arg: argument passed on to compare function + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +bool rhashtable_lookup_compare_insert(struct rhashtable *ht, + struct rhash_head *obj, + bool (*compare)(void *, void *), + void *arg) { struct bucket_table *new_tbl, *old_tbl; spinlock_t *new_bucket_lock, *old_bucket_lock; @@ -747,7 +784,8 @@ bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) if (unlikely(old_tbl != new_tbl)) spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); - if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) { + if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, + compare, arg)) { success = false; goto exit; } @@ -763,7 +801,7 @@ exit: return success; } -EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); +EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); static size_t rounded_hashtable_size(struct rhashtable_params *params) { -- cgit v1.2.3 From 6f73d3b13dc5e16ae06025cd1b12a36b2857caa2 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Mon, 12 Jan 2015 14:52:24 +0800 Subject: rhashtable: add a note for grow and shrink decision functions As commit c0c09bfdc415 ("rhashtable: avoid unnecessary wakeup for worker queue") moves condition statements of verifying whether hash table size exceeds its maximum threshold or reaches its minimum threshold from resizing functions to resizing decision functions, we should add a note in rhashtable.h to indicate the implementation of what the grow and shrink decision function must enforce min/max shift, otherwise, it's failed to take min/max shift's set watermarks into effect. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 7b9bd77ed684..9570832ab07c 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -79,6 +79,10 @@ struct rhashtable; * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand * @shrink_decision: If defined, may return true if table should shrink + * + * Note: when implementing the grow and shrink decision function, min/max + * shift must be enforced, otherwise, resizing watermarks they set may be + * useless. */ struct rhashtable_params { size_t nelem_hint; -- cgit v1.2.3 From df8a39defad46b83694ea6dd868d332976d62cc0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 13 Jan 2015 17:13:44 +0100 Subject: net: rename vlan_tx_* helpers since "tx" is misleading there The same macros are used for rx as well. So rename it. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 4 ++-- drivers/infiniband/hw/nes/nes_nic.c | 13 +++++++------ drivers/net/ethernet/3com/typhoon.c | 4 ++-- drivers/net/ethernet/alteon/acenic.c | 8 ++++---- drivers/net/ethernet/amd/amd8111e.c | 4 ++-- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 8 ++++---- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 4 ++-- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 4 ++-- drivers/net/ethernet/atheros/atlx/atl1.c | 4 ++-- drivers/net/ethernet/atheros/atlx/atl2.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 4 ++-- drivers/net/ethernet/broadcom/tg3.c | 4 ++-- drivers/net/ethernet/brocade/bna/bnad.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb/sge.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb3/sge.c | 6 +++--- drivers/net/ethernet/chelsio/cxgb4/sge.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 4 ++-- drivers/net/ethernet/cisco/enic/enic_main.c | 4 ++-- drivers/net/ethernet/emulex/benet/be_main.c | 12 ++++++------ drivers/net/ethernet/freescale/gianfar.c | 4 ++-- drivers/net/ethernet/ibm/ehea/ehea_main.c | 4 ++-- drivers/net/ethernet/intel/e1000/e1000_main.c | 5 +++-- drivers/net/ethernet/intel/e1000e/netdev.c | 9 +++++---- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 4 ++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 4 ++-- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- drivers/net/ethernet/intel/igbvf/netdev.c | 5 +++-- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 4 ++-- drivers/net/ethernet/jme.c | 4 ++-- drivers/net/ethernet/marvell/sky2.c | 6 +++--- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 12 ++++++------ drivers/net/ethernet/natsemi/ns83820.c | 4 ++-- drivers/net/ethernet/neterion/s2io.c | 4 ++-- drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 ++-- drivers/net/ethernet/nvidia/forcedeth.c | 4 ++-- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 4 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 8 ++++---- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 6 +++--- drivers/net/ethernet/realtek/8139cp.c | 4 ++-- drivers/net/ethernet/realtek/r8169.c | 4 ++-- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 4 ++-- drivers/net/ethernet/via/via-rhine.c | 6 +++--- drivers/net/ethernet/via/via-velocity.c | 4 ++-- drivers/net/macvtap.c | 6 +++--- drivers/net/tun.c | 4 ++-- drivers/net/usb/r8152.c | 4 ++-- drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++-- drivers/net/vxlan.c | 4 ++-- drivers/s390/net/qeth_l3_main.c | 8 ++++---- drivers/vhost/net.c | 2 +- include/linux/if_vlan.h | 16 ++++++++-------- include/net/pkt_sched.h | 2 +- include/trace/events/net.h | 8 ++++---- net/8021q/vlan_core.c | 2 +- net/bridge/br_netfilter.c | 12 ++++++------ net/bridge/br_private.h | 4 ++-- net/bridge/br_vlan.c | 4 ++-- net/bridge/netfilter/ebt_vlan.c | 4 ++-- net/bridge/netfilter/ebtables.c | 2 +- net/core/dev.c | 10 +++++----- net/core/netpoll.c | 2 +- net/core/skbuff.c | 8 ++++---- net/ipv4/geneve.c | 2 +- net/openvswitch/actions.c | 4 ++-- net/openvswitch/datapath.c | 2 +- net/openvswitch/flow.c | 4 ++-- net/openvswitch/vport-gre.c | 2 +- net/openvswitch/vport.c | 3 ++- net/packet/af_packet.c | 12 ++++++------ net/sched/em_meta.c | 2 +- net/wireless/util.c | 4 ++-- 77 files changed, 195 insertions(+), 190 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 58d08f8d8d80..9930ecfbb465 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -279,8 +279,8 @@ Possible BPF extensions are shown in the following table: hatype skb->dev->type rxhash skb->hash cpu raw_smp_processor_id() - vlan_tci vlan_tx_tag_get(skb) - vlan_pr vlan_tx_tag_present(skb) + vlan_tci skb_vlan_tag_get(skb) + vlan_pr skb_vlan_tag_present(skb) rand prandom_u32() These extensions can also be prefixed with '#'. diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 49eb5111d2cd..70acda91eb2a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", - netdev->name, vlan_tx_tag_get(skb)); + netdev->name, skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; - wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); + wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; @@ -576,11 +576,12 @@ tso_sq_no_longer_full: wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", - netdev->name, vlan_tx_tag_get(skb) ); + netdev->name, + skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; - wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); + wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index dede43f4ce09..8f8418d2ac4a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; } - if(vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { first_txd->processFlags |= TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; first_txd->processFlags |= - cpu_to_le32(htons(vlan_tx_tag_get(skb)) << + cpu_to_le32(htons(skb_vlan_tag_get(skb)) << TYPHOON_TX_PF_VLAN_TAG_SHIFT); } diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index b68074803de3..b90a26b13fdf 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2429,9 +2429,9 @@ restart: flagsize = (skb->len << 16) | (BD_FLG_END); if (skb->ip_summed == CHECKSUM_PARTIAL) flagsize |= BD_FLG_TCP_UDP_SUM; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { flagsize |= BD_FLG_VLAN_TAG; - vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = skb_vlan_tag_get(skb); } desc = ap->tx_ring + idx; idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); @@ -2450,9 +2450,9 @@ restart: flagsize = (skb_headlen(skb) << 16); if (skb->ip_summed == CHECKSUM_PARTIAL) flagsize |= BD_FLG_TCP_UDP_SUM; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { flagsize |= BD_FLG_VLAN_TAG; - vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = skb_vlan_tag_get(skb); } ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 841e6558db68..4c2ae2221780 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, lp->tx_ring[tx_index].tx_flags = 0; #if AMD8111E_VLAN_TAG_USED - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { lp->tx_ring[tx_index].tag_ctrl_cmd |= cpu_to_le16(TCC_VLAN_INSERT); lp->tx_ring[tx_index].tag_ctrl_info = - cpu_to_le16(vlan_tx_tag_get(skb)); + cpu_to_le16(skb_vlan_tag_get(skb)); } #endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7bb5f07dbeef..2ba1dd22ad64 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1165,8 +1165,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) { - if (vlan_tx_tag_present(skb)) - packet->vlan_ctag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) + packet->vlan_ctag = skb_vlan_tag_get(skb); } static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) @@ -1247,9 +1247,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE, 1); - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { /* VLAN requires an extra descriptor if tag is different */ - if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag) + if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) /* We can share with the TSO context descriptor */ if (!context_desc) { context_desc = 1; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index c9946c6c119e..587f63e87588 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } - if (unlikely(vlan_tx_tag_present(skb))) { - u16 vlan = vlan_tx_tag_get(skb); + if (unlikely(skb_vlan_tag_present(skb))) { + u16 vlan = skb_vlan_tag_get(skb); __le16 tag; vlan = cpu_to_le16(vlan); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index c88abf5b6415..59a03a193e83 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, tpd = atl1e_get_tpd(adapter); - if (vlan_tx_tag_present(skb)) { - u16 vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); u16 atl1e_vlan_tag; tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 2c8f398aeda9..eca1d113fee1 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, (u16) atomic_read(&tpd_ring->next_to_use)); memset(ptpd, 0, sizeof(struct tx_packet_desc)); - if (vlan_tx_tag_present(skb)) { - vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + vlan_tag = skb_vlan_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 482a7cabb0a1..46a535318c7a 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, offset = ((u32)(skb->len-copy_len + 3) & ~3); } #ifdef NETIF_F_HW_VLAN_CTAG_TX - if (vlan_tx_tag_present(skb)) { - u16 vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 823d01c5684c..02bf0b86995b 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { vlan_tag_flags |= - (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); + (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16)); } if ((mss = skb_shinfo(skb)->gso_size)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..b51a18a09d4d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) "sending pkt %u @%p next_idx %u bd %u @%p\n", pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_start_bd->vlan_or_ethertype = - cpu_to_le16(vlan_tx_tag_get(skb)); + cpu_to_le16(skb_vlan_tag_get(skb)); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 356bd5b022a5..4cf43bfbc955 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8002,9 +8002,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) !mss && skb->len > VLAN_ETH_FRAME_LEN) base_flags |= TXD_FLAG_JMB_PKT; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { base_flags |= TXD_FLAG_VLAN; - vlan = vlan_tx_tag_get(skb); + vlan = skb_vlan_tag_get(skb); } if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 323721838cf9..7714d7790089 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, u32 gso_size; u16 vlan_tag = 0; - if (vlan_tx_tag_present(skb)) { - vlan_tag = (u16)vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + vlan_tag = (u16)skb_vlan_tag_get(skb); flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); } if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index babe2a915b00..526ea74e82d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) } cpl->iff = dev->if_port; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { cpl->vlan_valid = 1; - cpl->vlan = htons(vlan_tx_tag_get(skb)); + cpl->vlan = htons(skb_vlan_tag_get(skb)); st->vlan_insert++; } else cpl->vlan_valid = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 3dfcf600fcc6..d6aa602f168d 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, cpl->len = htonl(skb->len); cntrl = V_TXPKT_INTF(pi->port_id); - if (vlan_tx_tag_present(skb)) - cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); + if (skb_vlan_tag_present(skb)) + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb)); tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); if (tso_info) { @@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) qs->port_stats[SGE_PSTAT_TX_CSUM]++; if (skb_shinfo(skb)->gso_size) qs->port_stats[SGE_PSTAT_TSO]++; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) qs->port_stats[SGE_PSTAT_VLANINS]++; /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ca42e2e9dec9..619156112b21 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1154,9 +1154,9 @@ out_free: dev_kfree_skb_any(skb); cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { q->vlan_ins++; - cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); } cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 4424277a7e4d..0545f0de1c52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1326,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * If there's a VLAN tag present, add that to the list of things to * do in this Work Request. */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { txq->vlan_ins++; - cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); } /* diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9a952df6606e..0535f6fbdc71 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -520,10 +520,10 @@ static inline void enic_queue_wq_skb(struct enic *enic, int loopback = 0; int err; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { /* VLAN tag from trunking driver */ vlan_tag_insert = 1; - vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = skb_vlan_tag_get(skb); } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 37a26b0b7e33..ed46610e5453 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -694,7 +694,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, u8 vlan_prio; u16 vlan_tag; - vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = skb_vlan_tag_get(skb); vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) @@ -745,7 +745,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { SET_TX_WRB_HDR_BITS(vlan, hdr, 1); vlan_tag = be_get_tx_vlan_tag(adapter, skb); SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); @@ -864,7 +864,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (unlikely(!skb)) return skb; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) vlan_tag = be_get_tx_vlan_tag(adapter, skb); if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { @@ -923,7 +923,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb) static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) { - return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; + return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid; } static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) @@ -946,7 +946,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? VLAN_ETH_HLEN : ETH_HLEN; if (skb->len <= 60 && - (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && + (lancer_chip(adapter) || skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); @@ -964,7 +964,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && - vlan_tx_tag_present(skb)) { + skb_vlan_tag_present(skb)) { skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto err; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e54b1e39f9b4..93ff846e96f1 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2170,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; - fcb->vlctl = vlan_tx_tag_get(skb); + fcb->vlctl = skb_vlan_tag_get(skb); } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, @@ -2230,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) regs = tx_queue->grp->regs; do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); - do_vlan = vlan_tx_tag_present(skb); + do_vlan = skb_vlan_tag_present(skb); do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 566b17db135a..e8a1adb7a962 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(swqe, 0, SWQE_HEADER_SIZE); atomic_dec(&pr->swqe_avail); - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; - swqe->vlan_tag = vlan_tx_tag_get(skb); + swqe->vlan_tag = skb_vlan_tag_get(skb); } pr->tx_packets++; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 83140cbb5f01..9242982db3e0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3226,9 +3226,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_flags |= E1000_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); } first = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 332a298e95b5..38cb586b1bf4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5463,8 +5463,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct e1000_hw *hw = &adapter->hw; u16 length, offset; - if (vlan_tx_tag_present(skb) && - !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + if (skb_vlan_tag_present(skb) && + !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) return 0; @@ -5603,9 +5603,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (e1000_maybe_stop_tx(tx_ring, count + 2)) return NETDEV_TX_BUSY; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_flags |= E1000_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); } first = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index eb088b129bc7..caa43f7c2931 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -965,8 +965,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, tx_desc = FM10K_TX_DESC(tx_ring, i); /* add HW VLAN tag */ - if (vlan_tx_tag_present(skb)) - tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + if (skb_vlan_tag_present(skb)) + tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); else tx_desc->vlan = 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 8811364b91cb..945b35d31c71 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) int err; if ((skb->protocol == htons(ETH_P_8021Q)) && - !vlan_tx_tag_present(skb)) { + !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame * are considered 2nd level or "outer" tags */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 04b441460bbd..9f536dd8e1ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1772,8 +1772,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, u32 tx_flags = 0; /* if we have a HW VLAN tag being added, default to the HW one */ - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= I40E_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 04c7c1557a0c..82c3798fdd36 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1122,8 +1122,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, u32 tx_flags = 0; /* if we have a HW VLAN tag being added, default to the HW one */ - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= I40E_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ff59897a9463..6c25ec314183 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } /* record initial flags and protocol */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 63c807c9b21c..ad2b4897b392 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2234,9 +2234,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_flags |= IGBVF_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); + tx_flags |= (skb_vlan_tag_get(skb) << + IGBVF_TX_FLAGS_VLAN_SHIFT); } if (skb->protocol == htons(ETH_P_IP)) diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index aa87605b144a..11a1bdbe3fd9 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) DESC_NEEDED))) return NETDEV_TX_BUSY; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { tx_flags |= IXGB_TX_FLAGS_VLAN; - vlan_id = vlan_tx_tag_get(skb); + vlan_id = skb_vlan_tag_get(skb); } first = adapter->tx_ring.next_to_use; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ed2c7de2304..7bb421bfd84e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7217,8 +7217,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* if we have a HW VLAN tag being added default to the HW one */ - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 62a0d8e0f17d..c9b49bfb51bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3452,8 +3452,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) first->bytecount = skb->len; first->gso_segs = 1; - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 44ce7d88f554..6e9a792097d3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) static inline void jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) { - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { *flags |= TXFLAG_TAGON; - *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); } } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 867a6a3ef81f..d9f4498832a1 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, ctrl = 0; /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { if (!le) { le = get_tx_le(sky2, &slot); le->addr = 0; le->opcode = OP_VLAN|HW_OWNER; } else le->opcode |= OP_VLAN; - le->length = cpu_to_be16(vlan_tx_tag_get(skb)); + le->length = cpu_to_be16(skb_vlan_tag_get(skb)); ctrl |= INS_VLAN; } @@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); - if (vlan_tx_tag_present(re->skb)) + if (skb_vlan_tag_present(re->skb)) count -= VLAN_HLEN; /* Account for vlan tag */ /* This chip has hardware problems that generates bogus status. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e3357bf523df..359bb1286eb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, if (dev->num_tc) return skb_tx_hash(dev, skb); - if (vlan_tx_tag_present(skb)) - up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; return fallback(dev, skb) % rings_p_up + up * rings_p_up; } @@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - if (vlan_tx_tag_present(skb)) - vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) + vlan_tag = skb_vlan_tag_get(skb); netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); @@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) real_size = (real_size / 16) & 0x3f; if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && - !vlan_tx_tag_present(skb) && send_doorbell) { + !skb_vlan_tag_present(skb) && send_doorbell) { tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); @@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) } else { tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!vlan_tx_tag_present(skb); + !!skb_vlan_tag_present(skb); tx_desc->ctrl.fence_size = real_size; /* Ensure new descriptor hits memory diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 2552e550a78c..eb807b0dc72a 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1122,12 +1122,12 @@ again: } #ifdef NS83820_VLAN_ACCEL_SUPPORT - if(vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { /* fetch the vlan tag info out of the * ancillary data if the vlan code * is using hw vlan acceleration */ - short tag = vlan_tx_tag_get(skb); + short tag = skb_vlan_tag_get(skb); extsts |= (EXTSTS_VPKT | htons(tag)); } #endif diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f5e4b820128b..0529cad75b10 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } queue = 0; - if (vlan_tx_tag_present(skb)) - vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) + vlan_tag = skb_vlan_tag_get(skb); if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *ip; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index cc0485e3c621..50d5604833ed 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, __func__, __LINE__, fifo_hw, dtr, dtr_priv); - if (vlan_tx_tag_present(skb)) { - u16 vlan_tag = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index f39cae620f61..a41bb5e6b954 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; /* vlan tag */ - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | - vlan_tx_tag_get(skb)); + skb_vlan_tag_get(skb)); else start_tx->txvlan = 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..a47fe67fdf58 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1893,9 +1893,9 @@ netxen_tso_check(struct net_device *netdev, protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; - } else if (vlan_tx_tag_present(skb)) { + } else if (skb_vlan_tag_present(skb)) { flags = FLAGS_VLAN_OOB; - vid = vlan_tx_tag_get(skb); + vid = skb_vlan_tag_get(skb); netxen_set_tx_vlan_tci(first_desc, vid); vlan_oob = 1; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index d166e534925d..4d2496f28b85 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -321,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (protocol == ETH_P_8021Q) { vh = (struct vlan_ethhdr *)skb->data; vlan_id = ntohs(vh->h_vlan_TCI); - } else if (vlan_tx_tag_present(skb)) { - vlan_id = vlan_tx_tag_get(skb); + } else if (skb_vlan_tag_present(skb)) { + vlan_id = skb_vlan_tag_get(skb); } } @@ -473,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); - } else if (vlan_tx_tag_present(skb)) { + } else if (skb_vlan_tag_present(skb)) { flags = QLCNIC_FLAGS_VLAN_OOB; - vlan_tci = vlan_tx_tag_get(skb); + vlan_tci = skb_vlan_tag_get(skb); } if (unlikely(adapter->tx_pvid)) { if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 6c904a6cad2a..dc0058f90370 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2660,11 +2660,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); + "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb)); mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; - mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); + mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); } tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); if (tso < 0) { diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 9c31e46d1eee..d79e33b3c191 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp) static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) { - return vlan_tx_tag_present(skb) ? - TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; + return skb_vlan_tag_present(skb) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; } static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3a280598a15a..cd286b0356ab 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev, static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) { - return (vlan_tx_tag_present(skb)) ? - TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; } static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index b6612d6090ac..23545e1e605a 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1272,7 +1272,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) ctxt_desc_req = 1; - if (unlikely(vlan_tx_tag_present(skb) || + if (unlikely(skb_vlan_tag_present(skb) || ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && tqueue->hwts_tx_en))) ctxt_desc_req = 1; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 6ab36d9ff2ab..a9cac8413e49 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, txd_mss); } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { /*Cut VLAN ID to 12 bits */ - txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); + txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12); txd_vtag = 1; } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index a191afc23b56..0ac76102b33d 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1781,8 +1781,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, rp->tx_ring[entry].desc_length = cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); - if (unlikely(vlan_tx_tag_present(skb))) { - u16 vid_pcp = vlan_tx_tag_get(skb); + if (unlikely(skb_vlan_tag_present(skb))) { + u16 vid_pcp = skb_vlan_tag_get(skb); /* drop CFI/DEI bit, register needs VID and PCP */ vid_pcp = (vid_pcp & VLAN_VID_MASK) | @@ -1803,7 +1803,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, /* Non-x86 Todo: explicitly flush cache lines here. */ - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 282f83a63b67..c20206f83cc1 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; - if (vlan_tx_tag_present(skb)) { - td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + if (skb_vlan_tag_present(skb)) { + td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); td_ptr->tdesc1.TCR |= TCR0_VETAG; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 7df221788cd4..d0ed5694dd7d 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -645,7 +645,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) vnet_hdr->csum_start = cpu_to_macvtap16(q, skb_checksum_start_offset(skb) + VLAN_HLEN); else @@ -821,13 +821,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, total = vnet_hdr_len; total += skb->len; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; } veth; veth.h_vlan_proto = skb->vlan_proto; - veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); + veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); total += VLAN_HLEN; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 74fdf1158448..be196e89ab6c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1260,7 +1260,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, int vlan_hlen = 0; int vnet_hdr_sz = 0; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) @@ -1337,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, } veth; veth.h_vlan_proto = skb->vlan_proto; - veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); + veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b23426e4952c..e519e6a269b9 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1421,10 +1421,10 @@ static int msdn_giant_send_check(struct sk_buff *skb) static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { u32 opts2; - opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb)); + opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb)); desc->opts2 |= cpu_to_le32(opts2); } } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 31439818c27e..294214c15292 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, le32_add_cpu(&tq->shared->txNumDeferred, 1); } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { gdesc->txd.ti = 1; - gdesc->txd.tci = vlan_tx_tag_get(skb); + gdesc->txd.tci = skb_vlan_tag_get(skb); } /* finally flips the GEN bit of the SOP desc. */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3a18d8ed89ca..985359dd6033 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1561,7 +1561,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + VXLAN_HLEN + sizeof(struct ipv6hdr) - + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); @@ -1607,7 +1607,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + VXLAN_HLEN + sizeof(struct iphdr) - + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..dd4ab8d73d34 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2800,12 +2800,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, * before we're going to overwrite this location with next hop ip. * v6 uses passthrough, v4 sets the tag in the QDIO header. */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; else hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; - hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); + hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); } hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); @@ -2986,7 +2986,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_pull(new_skb, ETH_HLEN); } - if (ipv != 4 && vlan_tx_tag_present(new_skb)) { + if (ipv != 4 && skb_vlan_tag_present(new_skb)) { skb_push(new_skb, VLAN_HLEN); skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); skb_copy_to_linear_data_offset(new_skb, 4, @@ -2995,7 +2995,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) new_skb->data + 12, 4); tag = (u16 *)(new_skb->data + 12); *tag = __constant_htons(ETH_P_8021Q); - *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); + *(tag + 1) = htons(skb_vlan_tag_get(new_skb)); } } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 14419a8ccbb6..bcaf4cabb858 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -469,7 +469,7 @@ static int peek_head_len(struct sock *sk) head = skb_peek(&sk->sk_receive_queue); if (likely(head)) { len = head->len; - if (vlan_tx_tag_present(head)) + if (skb_vlan_tag_present(head)) len += VLAN_HLEN; } diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 515a35e2a48a..bea465f24ebb 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) -#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats @@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb_vlan_tag_get(skb)); if (likely(skb)) skb->vlan_tci = 0; return skb; @@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) */ static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) { - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) skb = __vlan_hwaccel_push_inside(skb); return skb; } @@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { - if (vlan_tx_tag_present(skb)) { - *vlan_tci = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + *vlan_tci = skb_vlan_tag_get(skb); return 0; } else { *vlan_tci = 0; @@ -480,7 +480,7 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { __be16 protocol = 0; - if (vlan_tx_tag_present(skb) || + if (skb_vlan_tag_present(skb) || skb->protocol != cpu_to_be16(ETH_P_8021Q)) protocol = skb->protocol; else { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index fe6e7aac3c56..2342bf12cb78 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -121,7 +121,7 @@ static inline __be16 tc_skb_protocol(const struct sk_buff *skb) * vlan accelerated path. In that case, use skb->vlan_proto * as the original vlan header was already stripped. */ - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) return skb->vlan_proto; return skb->protocol; } diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 1de256b35807..49cc7c3de252 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit, __assign_str(name, dev->name); __entry->queue_mapping = skb->queue_mapping; __entry->skbaddr = skb; - __entry->vlan_tagged = vlan_tx_tag_present(skb); + __entry->vlan_tagged = skb_vlan_tag_present(skb); __entry->vlan_proto = ntohs(skb->vlan_proto); - __entry->vlan_tci = vlan_tx_tag_get(skb); + __entry->vlan_tci = skb_vlan_tag_get(skb); __entry->protocol = ntohs(skb->protocol); __entry->ip_summed = skb->ip_summed; __entry->len = skb->len; @@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template, #endif __entry->queue_mapping = skb->queue_mapping; __entry->skbaddr = skb; - __entry->vlan_tagged = vlan_tx_tag_present(skb); + __entry->vlan_tagged = skb_vlan_tag_present(skb); __entry->vlan_proto = ntohs(skb->vlan_proto); - __entry->vlan_tci = vlan_tx_tag_get(skb); + __entry->vlan_tci = skb_vlan_tag_get(skb); __entry->protocol = ntohs(skb->protocol); __entry->ip_summed = skb->ip_summed; __entry->hash = skb->hash; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 90cc2bdd4064..61bf2a06e85d 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; __be16 vlan_proto = skb->vlan_proto; - u16 vlan_id = vlan_tx_tag_get_id(skb); + u16 vlan_id = skb_vlan_tag_get_id(skb); struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index c190d22b6b3d..65728e0dc4ff 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -66,17 +66,17 @@ static int brnf_pass_vlan_indev __read_mostly = 0; #endif #define IS_IP(skb) \ - (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP)) + (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP)) #define IS_IPV6(skb) \ - (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6)) + (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6)) #define IS_ARP(skb) \ - (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) + (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) static inline __be16 vlan_proto(const struct sk_buff *skb) { - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) return skb->protocol; else if (skb->protocol == htons(ETH_P_8021Q)) return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; @@ -436,11 +436,11 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct struct net_device *vlan, *br; br = bridge_parent(dev); - if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) + if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb)) return br; vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto, - vlan_tx_tag_get(skb) & VLAN_VID_MASK); + skb_vlan_tag_get(skb) & VLAN_VID_MASK); return vlan ? vlan : br; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index aea3d1339b3f..d808d766334d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -628,8 +628,8 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid) { int err = 0; - if (vlan_tx_tag_present(skb)) - *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK; + if (skb_vlan_tag_present(skb)) + *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK; else { *vid = 0; err = -EINVAL; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 97b8ddf57363..13013fe8db24 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -187,7 +187,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, * sent from vlan device on the bridge device, it does not have * HW accelerated vlan tag. */ - if (unlikely(!vlan_tx_tag_present(skb) && + if (unlikely(!skb_vlan_tag_present(skb) && skb->protocol == proto)) { skb = skb_vlan_untag(skb); if (unlikely(!skb)) @@ -200,7 +200,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, /* Protocol-mismatch, empty out vlan_tci for new tag */ skb_push(skb, ETH_HLEN); skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb_vlan_tag_get(skb)); if (unlikely(!skb)) return false; diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c index 8d3f8c7651f0..618568888128 100644 --- a/net/bridge/netfilter/ebt_vlan.c +++ b/net/bridge/netfilter/ebt_vlan.c @@ -45,8 +45,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par) /* VLAN encapsulated Type/Length field, given from orig frame */ __be16 encap; - if (vlan_tx_tag_present(skb)) { - TCI = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + TCI = skb_vlan_tag_get(skb); encap = skb->protocol; } else { const struct vlan_hdr *fp; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d9a8c05d995d..91180a7fc943 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -133,7 +133,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, __be16 ethproto; int verdict, i; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) ethproto = htons(ETH_P_8021Q); else ethproto = h->h_proto; diff --git a/net/core/dev.c b/net/core/dev.c index 805456147c30..1e325adc4367 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2578,7 +2578,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) if (skb->encapsulation) features &= dev->hw_enc_features; - if (!vlan_tx_tag_present(skb)) { + if (!skb_vlan_tag_present(skb)) { if (unlikely(protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; @@ -2659,7 +2659,7 @@ out: static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features) { - if (vlan_tx_tag_present(skb) && + if (skb_vlan_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) skb = __vlan_hwaccel_push_inside(skb); return skb; @@ -3676,7 +3676,7 @@ ncls: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; @@ -3708,8 +3708,8 @@ ncls: } } - if (unlikely(vlan_tx_tag_present(skb))) { - if (vlan_tx_tag_get_id(skb)) + if (unlikely(skb_vlan_tag_present(skb))) { + if (skb_vlan_tag_get_id(skb)) skb->pkt_type = PACKET_OTHERHOST; /* Note: we might in the future use prio bits * and set skb->priority like in vlan_do_receive() diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e0ad5d16c9c5..c126a878c47c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -77,7 +77,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, features = netif_skb_features(skb); - if (vlan_tx_tag_present(skb) && + if (skb_vlan_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) { skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5a2a2e887a12..56db472e9b86 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4197,7 +4197,7 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) struct vlan_hdr *vhdr; u16 vlan_tci; - if (unlikely(vlan_tx_tag_present(skb))) { + if (unlikely(skb_vlan_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } @@ -4283,7 +4283,7 @@ int skb_vlan_pop(struct sk_buff *skb) __be16 vlan_proto; int err; - if (likely(vlan_tx_tag_present(skb))) { + if (likely(skb_vlan_tag_present(skb))) { skb->vlan_tci = 0; } else { if (unlikely((skb->protocol != htons(ETH_P_8021Q) && @@ -4313,7 +4313,7 @@ EXPORT_SYMBOL(skb_vlan_pop); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { unsigned int offset = skb->data - skb_mac_header(skb); int err; @@ -4323,7 +4323,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) */ __skb_push(skb, offset); err = __vlan_insert_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb_vlan_tag_get(skb)); if (err) return err; skb->protocol = skb->vlan_proto; diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 5b52046ec7a2..23744c7a9718 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c @@ -119,7 +119,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) - + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 770064c83711..b4cffe686126 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -212,7 +212,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) int err; err = skb_vlan_pop(skb); - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) invalidate_flow_key(key); else key->eth.tci = 0; @@ -222,7 +222,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_vlan *vlan) { - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) invalidate_flow_key(key); else key->eth.tci = vlan->vlan_tci; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 4e9a5f035cbc..54854e3ecd83 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -419,7 +419,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, if (!dp_ifindex) return -ENODEV; - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index da2fae0873a5..df334fe43d7f 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -70,7 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, { struct flow_stats *stats; int node = numa_node_id(); - int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); stats = rcu_dereference(flow->stats[node]); @@ -472,7 +472,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) */ key->eth.tci = 0; - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index d4168c442db5..e9aedb7c7106 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -166,7 +166,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr) - + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 2034c6d9cb5a..464739aac0f3 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -480,7 +480,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, stats = this_cpu_ptr(vport->percpu_stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; - stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + stats->rx_bytes += skb->len + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); u64_stats_update_end(&stats->syncp); OVS_CB(skb)->input_vport = vport; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 0f02668dc219..d37075b0d6d5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -986,8 +986,8 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { - if (vlan_tx_tag_present(pkc->skb)) { - ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); + if (skb_vlan_tag_present(pkc->skb)) { + ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { @@ -2000,8 +2000,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; - if (vlan_tx_tag_present(skb)) { - h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { @@ -3010,8 +3010,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); - if (vlan_tx_tag_present(skb)) { - aux.tp_vlan_tci = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 2159981b604e..b5294ce20cd4 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -176,7 +176,7 @@ META_COLLECTOR(int_vlan_tag) { unsigned short tag; - tag = vlan_tx_tag_get(skb); + tag = skb_vlan_tag_get(skb); if (!tag && __vlan_get_tag(skb, &tag)) *err = -1; else diff --git a/net/wireless/util.c b/net/wireless/util.c index d0ac795445b7..1d2fcfad06cc 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -708,8 +708,8 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, if (skb->priority >= 256 && skb->priority <= 263) return skb->priority - 256; - if (vlan_tx_tag_present(skb)) { - vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) + if (skb_vlan_tag_present(skb)) { + vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; if (vlan_priority > 0) return vlan_priority; -- cgit v1.2.3 From a2b12f3c7ac1ea43ae646db74faf0b56c2bba563 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 12 Jan 2015 17:00:37 -0800 Subject: udp: pass udp_offload struct to UDP gro callbacks This patch introduces udp_offload_callbacks which has the same GRO functions (but not a GSO function) as offload_callbacks, except there is an argument to a udp_offload struct passed to gro_receive and gro_complete functions. This additional argument can be used to retrieve the per port structure of the encapsulation for use in gro processing (mostly by doing container_of on the structure). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 7 +++++-- include/linux/netdevice.h | 15 +++++++++++++-- net/ipv4/fou.c | 12 ++++++++---- net/ipv4/geneve.c | 6 ++++-- net/ipv4/udp_offload.c | 7 +++++-- 5 files changed, 35 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 985359dd6033..5c56a3ff25aa 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -539,7 +539,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, return 1; } -static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb) +static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff) { struct sk_buff *p, **pp = NULL; struct vxlanhdr *vh, *vh2; @@ -578,7 +580,8 @@ out: return pp; } -static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) +static int vxlan_gro_complete(struct sk_buff *skb, int nhoff, + struct udp_offload *uoff) { udp_tunnel_gro_complete(skb, nhoff); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 679e6e90aa4c..47921c291dd6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1969,7 +1969,7 @@ struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb); + struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; @@ -1979,10 +1979,21 @@ struct packet_offload { struct list_head list; }; +struct udp_offload; + +struct udp_offload_callbacks { + struct sk_buff **(*gro_receive)(struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff); + int (*gro_complete)(struct sk_buff *skb, + int nhoff, + struct udp_offload *uoff); +}; + struct udp_offload { __be16 port; u8 ipproto; - struct offload_callbacks callbacks; + struct udp_offload_callbacks callbacks; }; /* often modified stats are per cpu, other are shared (netdev->stats) */ diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 2197c36f722f..3bc0cf07661c 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -174,7 +174,8 @@ drop: } static struct sk_buff **fou_gro_receive(struct sk_buff **head, - struct sk_buff *skb) + struct sk_buff *skb, + struct udp_offload *uoff) { const struct net_offload *ops; struct sk_buff **pp = NULL; @@ -195,7 +196,8 @@ out_unlock: return pp; } -static int fou_gro_complete(struct sk_buff *skb, int nhoff) +static int fou_gro_complete(struct sk_buff *skb, int nhoff, + struct udp_offload *uoff) { const struct net_offload *ops; u8 proto = NAPI_GRO_CB(skb)->proto; @@ -254,7 +256,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, } static struct sk_buff **gue_gro_receive(struct sk_buff **head, - struct sk_buff *skb) + struct sk_buff *skb, + struct udp_offload *uoff) { const struct net_offload **offloads; const struct net_offload *ops; @@ -360,7 +363,8 @@ out: return pp; } -static int gue_gro_complete(struct sk_buff *skb, int nhoff) +static int gue_gro_complete(struct sk_buff *skb, int nhoff, + struct udp_offload *uoff) { const struct net_offload **offloads; struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 23744c7a9718..9568594ca2f1 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c @@ -147,7 +147,8 @@ static int geneve_hlen(struct genevehdr *gh) } static struct sk_buff **geneve_gro_receive(struct sk_buff **head, - struct sk_buff *skb) + struct sk_buff *skb, + struct udp_offload *uoff) { struct sk_buff *p, **pp = NULL; struct genevehdr *gh, *gh2; @@ -211,7 +212,8 @@ out: return pp; } -static int geneve_gro_complete(struct sk_buff *skb, int nhoff) +static int geneve_gro_complete(struct sk_buff *skb, int nhoff, + struct udp_offload *uoff) { struct genevehdr *gh; struct packet_offload *ptype; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index d3e537ef6b7f..d10f6f4ead27 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -339,7 +339,8 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - pp = uo_priv->offload->callbacks.gro_receive(head, skb); + pp = uo_priv->offload->callbacks.gro_receive(head, skb, + uo_priv->offload); out_unlock: rcu_read_unlock(); @@ -395,7 +396,9 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) if (uo_priv != NULL) { NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; - err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); + err = uo_priv->offload->callbacks.gro_complete(skb, + nhoff + sizeof(struct udphdr), + uo_priv->offload); } rcu_read_unlock(); -- cgit v1.2.3 From 57699a40b4f2694d3ee63fd5e6465ec8f600b620 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Fri, 16 Jan 2015 11:13:09 +0800 Subject: rhashtable: Fix race in rhashtable_destroy() and use regular work_struct When we put our declared work task in the global workqueue with schedule_delayed_work(), its delay parameter is always zero. Therefore, we should define a regular work in rhashtable structure instead of a delayed work. By the way, we add a condition to check whether resizing functions are NULL before cancelling the work, avoiding to cancel an uninitialized work. Lastly, while we wait for all work items we submitted before to run to completion with cancel_delayed_work(), ht->mutex has been taken in rhashtable_destroy(). Moreover, cancel_delayed_work() doesn't return until all work items are accomplished, and when work items are scheduled, the work's function - rht_deferred_worker() will be called. However, as rht_deferred_worker() also needs to acquire the lock, deadlock might happen at the moment as the lock is already held before. So if the cancel work function is moved out of the lock covered scope, this will avoid the deadlock. Fixes: 97defe1 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 +- lib/rhashtable.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 9570832ab07c..a2562ed53ea3 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -119,7 +119,7 @@ struct rhashtable { atomic_t nelems; atomic_t shift; struct rhashtable_params p; - struct delayed_work run_work; + struct work_struct run_work; struct mutex mutex; bool being_destroyed; }; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index aca699813ba9..84a78e396a56 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -485,7 +485,7 @@ static void rht_deferred_worker(struct work_struct *work) struct rhashtable *ht; struct bucket_table *tbl; - ht = container_of(work, struct rhashtable, run_work.work); + ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); @@ -507,7 +507,7 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) if (tbl == new_tbl && ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) - schedule_delayed_work(&ht->run_work, 0); + schedule_work(&ht->run_work); } static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, @@ -903,7 +903,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); if (ht->p.grow_decision || ht->p.shrink_decision) - INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker); + INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } @@ -921,11 +921,11 @@ void rhashtable_destroy(struct rhashtable *ht) { ht->being_destroyed = true; - mutex_lock(&ht->mutex); + if (ht->p.grow_decision || ht->p.shrink_decision) + cancel_work_sync(&ht->run_work); - cancel_delayed_work(&ht->run_work); + mutex_lock(&ht->mutex); bucket_table_free(rht_dereference(ht->tbl, ht)); - mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_destroy); -- cgit v1.2.3 From 3aeb66176ffa8fefd7a9f7d37bda1d8adcf469a1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 15 Jan 2015 23:49:37 +0100 Subject: net: replace br_fdb_external_learn_* calls with switchdev notifier events This patch benefits from newly introduced switchdev notifier and uses it to propagate fdb learn events from rocker driver to bridge. That avoids direct function calls and possible use by other listeners (ovs). Suggested-by: Thomas Graf Signed-off-by: Jiri Pirko Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 10 +++++-- include/linux/if_bridge.h | 18 ------------- include/net/switchdev.h | 11 ++++++++ net/bridge/br.c | 52 +++++++++++++++++++++++++++++++++++- net/bridge/br_fdb.c | 38 +++----------------------- net/bridge/br_private.h | 4 +++ 6 files changed, 78 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index cad8cf962cdf..964d719b150f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3026,11 +3026,17 @@ static void rocker_port_fdb_learn_work(struct work_struct *work) container_of(work, struct rocker_fdb_learn_work, work); bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); + struct netdev_switch_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; if (learned && removing) - br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid); + call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL, + lw->dev, &info.info); else if (learned && !removing) - br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid); + call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD, + lw->dev, &info.info); kfree(work); } diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 0a8ce762a47f..a57bca2ea97e 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; -#if IS_ENABLED(CONFIG_BRIDGE) -int br_fdb_external_learn_add(struct net_device *dev, - const unsigned char *addr, u16 vid); -int br_fdb_external_learn_del(struct net_device *dev, - const unsigned char *addr, u16 vid); -#else -static inline int br_fdb_external_learn_add(struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - return 0; -} -static inline int br_fdb_external_learn_del(struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - return 0; -} -#endif - #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 7f8d74372d87..201120e18e4d 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -13,10 +13,21 @@ #include #include +enum netdev_switch_notifier_type { + NETDEV_SWITCH_FDB_ADD = 1, + NETDEV_SWITCH_FDB_DEL, +}; + struct netdev_switch_notifier_info { struct net_device *dev; }; +struct netdev_switch_notifier_fdb_info { + struct netdev_switch_notifier_info info; /* must be first */ + const unsigned char *addr; + u16 vid; +}; + static inline struct net_device * netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info) { diff --git a/net/bridge/br.c b/net/bridge/br.c index 44425aff7cba..fb57ab6b24f9 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "br_private.h" @@ -120,6 +121,48 @@ static struct notifier_block br_device_notifier = { .notifier_call = br_device_event }; +static int br_netdev_switch_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr); + struct net_bridge_port *p; + struct net_bridge *br; + struct netdev_switch_notifier_fdb_info *fdb_info; + int err = NOTIFY_DONE; + + rtnl_lock(); + p = br_port_get_rtnl(dev); + if (!p) + goto out; + + br = p->br; + + switch (event) { + case NETDEV_SWITCH_FDB_ADD: + fdb_info = ptr; + err = br_fdb_external_learn_add(br, p, fdb_info->addr, + fdb_info->vid); + if (err) + err = notifier_from_errno(err); + break; + case NETDEV_SWITCH_FDB_DEL: + fdb_info = ptr; + err = br_fdb_external_learn_del(br, p, fdb_info->addr, + fdb_info->vid); + if (err) + err = notifier_from_errno(err); + break; + } + +out: + rtnl_unlock(); + return err; +} + +static struct notifier_block br_netdev_switch_notifier = { + .notifier_call = br_netdev_switch_event, +}; + static void __net_exit br_net_exit(struct net *net) { struct net_device *dev; @@ -169,10 +212,14 @@ static int __init br_init(void) if (err) goto err_out3; - err = br_netlink_init(); + err = register_netdev_switch_notifier(&br_netdev_switch_notifier); if (err) goto err_out4; + err = br_netlink_init(); + if (err) + goto err_out5; + brioctl_set(br_ioctl_deviceless_stub); #if IS_ENABLED(CONFIG_ATM_LANE) @@ -185,6 +232,8 @@ static int __init br_init(void) return 0; +err_out5: + unregister_netdev_switch_notifier(&br_netdev_switch_notifier); err_out4: unregister_netdevice_notifier(&br_device_notifier); err_out3: @@ -202,6 +251,7 @@ static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); br_netlink_fini(); + unregister_netdev_switch_notifier(&br_netdev_switch_notifier); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); unregister_pernet_subsys(&br_net_ops); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e6e0372bc3cd..03667e65cc29 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -990,26 +990,14 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) } } -int br_fdb_external_learn_add(struct net_device *dev, +int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid) { - struct net_bridge_port *p; - struct net_bridge *br; struct hlist_head *head; struct net_bridge_fdb_entry *fdb; int err = 0; - rtnl_lock(); - - p = br_port_get_rtnl(dev); - if (!p) { - pr_info("bridge: %s not a bridge port\n", dev->name); - err = -EINVAL; - goto err_rtnl_unlock; - } - - br = p->br; - + ASSERT_RTNL(); spin_lock_bh(&br->hash_lock); head = &br->hash[br_mac_hash(addr, vid)]; @@ -1034,33 +1022,18 @@ int br_fdb_external_learn_add(struct net_device *dev, err_unlock: spin_unlock_bh(&br->hash_lock); -err_rtnl_unlock: - rtnl_unlock(); return err; } -EXPORT_SYMBOL(br_fdb_external_learn_add); -int br_fdb_external_learn_del(struct net_device *dev, +int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid) { - struct net_bridge_port *p; - struct net_bridge *br; struct hlist_head *head; struct net_bridge_fdb_entry *fdb; int err = 0; - rtnl_lock(); - - p = br_port_get_rtnl(dev); - if (!p) { - pr_info("bridge: %s not a bridge port\n", dev->name); - err = -EINVAL; - goto err_rtnl_unlock; - } - - br = p->br; - + ASSERT_RTNL(); spin_lock_bh(&br->hash_lock); head = &br->hash[br_mac_hash(addr, vid)]; @@ -1071,9 +1044,6 @@ int br_fdb_external_learn_del(struct net_device *dev, err = -ENOENT; spin_unlock_bh(&br->hash_lock); -err_rtnl_unlock: - rtnl_unlock(); return err; } -EXPORT_SYMBOL(br_fdb_external_learn_del); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d808d766334d..e8e3f3681680 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -402,6 +402,10 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *fdev, int idx); int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p); +int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, + const unsigned char *addr, u16 vid); +int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, + const unsigned char *addr, u16 vid); /* br_forward.c */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); -- cgit v1.2.3 From c5ed1df781cb544d4e4d189bb5b6ec7336d8888c Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 19 Jan 2015 08:30:30 +0100 Subject: bcma: use standard bus scanning during early register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting with kernel 3.19-rc1 early registration of bcma on MIPS is done a bit later, with memory allocator available. This allows us to simplify code by using standard bus scanning method. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/bcma_private.h | 7 +---- drivers/bcma/host_soc.c | 2 +- drivers/bcma/main.c | 33 +++++++---------------- drivers/bcma/scan.c | 62 +++---------------------------------------- include/linux/bcma/bcma_soc.h | 2 -- 5 files changed, 15 insertions(+), 91 deletions(-) (limited to 'include/linux') diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 59422b5fa46c..3f314c98d089 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -28,9 +28,7 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core); void bcma_init_bus(struct bcma_bus *bus); int bcma_bus_register(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus); -int __init bcma_bus_early_register(struct bcma_bus *bus, - struct bcma_device *core_cc, - struct bcma_device *core_mips); +int __init bcma_bus_early_register(struct bcma_bus *bus); #ifdef CONFIG_PM int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); @@ -39,9 +37,6 @@ int bcma_bus_resume(struct bcma_bus *bus); /* scan.c */ void bcma_detect_chip(struct bcma_bus *bus); int bcma_bus_scan(struct bcma_bus *bus); -int __init bcma_bus_scan_early(struct bcma_bus *bus, - struct bcma_device_id *match, - struct bcma_device *core); /* sprom.c */ int bcma_sprom_get(struct bcma_bus *bus); diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 335cbcfd945b..2dce34789329 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -193,7 +193,7 @@ int __init bcma_host_soc_init(struct bcma_soc *soc) int err; /* Scan bus and initialize it */ - err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips); + err = bcma_bus_early_register(bus); if (err) iounmap(bus->mmio); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index c166d444fef6..c3c5e0a2d5be 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -489,35 +489,20 @@ void bcma_bus_unregister(struct bcma_bus *bus) kfree(cores[0]); } -int __init bcma_bus_early_register(struct bcma_bus *bus, - struct bcma_device *core_cc, - struct bcma_device *core_mips) +/* + * This is a special version of bus registration function designed for SoCs. + * It scans bus and performs basic initialization of main cores only. + * Please note it requires memory allocation, however it won't try to sleep. + */ +int __init bcma_bus_early_register(struct bcma_bus *bus) { int err; struct bcma_device *core; - struct bcma_device_id match; - - match.manuf = BCMA_MANUF_BCM; - match.id = bcma_cc_core_id(bus); - match.class = BCMA_CL_SIM; - match.rev = BCMA_ANY_REV; - - /* Scan for chip common core */ - err = bcma_bus_scan_early(bus, &match, core_cc); - if (err) { - bcma_err(bus, "Failed to scan for common core: %d\n", err); - return -1; - } - - match.manuf = BCMA_MANUF_MIPS; - match.id = BCMA_CORE_MIPS_74K; - match.class = BCMA_CL_SIM; - match.rev = BCMA_ANY_REV; - /* Scan for mips core */ - err = bcma_bus_scan_early(bus, &match, core_mips); + /* Scan for devices (cores) */ + err = bcma_bus_scan(bus); if (err) { - bcma_err(bus, "Failed to scan for mips core: %d\n", err); + bcma_err(bus, "Failed to scan bus: %d\n", err); return -1; } diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 5328ee5b4df0..df806b9c5490 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -461,6 +461,10 @@ int bcma_bus_scan(struct bcma_bus *bus) int err, core_num = 0; + /* Skip if bus was already scanned (e.g. during early register) */ + if (bus->nr_cores) + return 0; + erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); @@ -519,61 +523,3 @@ out: return err; } - -int __init bcma_bus_scan_early(struct bcma_bus *bus, - struct bcma_device_id *match, - struct bcma_device *core) -{ - u32 erombase; - u32 __iomem *eromptr, *eromend; - - int err = -ENODEV; - int core_num = 0; - - erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); - if (bus->hosttype == BCMA_HOSTTYPE_SOC) { - eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); - if (!eromptr) - return -ENOMEM; - } else { - eromptr = bus->mmio; - } - - eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32); - - bcma_scan_switch_core(bus, erombase); - - while (eromptr < eromend) { - memset(core, 0, sizeof(*core)); - INIT_LIST_HEAD(&core->list); - core->bus = bus; - - err = bcma_get_next_core(bus, &eromptr, match, core_num, core); - if (err == -ENODEV) { - core_num++; - continue; - } else if (err == -ENXIO) - continue; - else if (err == -ESPIPE) - break; - else if (err < 0) - goto out; - - core->core_index = core_num++; - bus->nr_cores++; - bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", - core->core_index, bcma_device_name(&core->id), - core->id.manuf, core->id.id, core->id.rev, - core->id.class); - - list_add_tail(&core->list, &bus->cores); - err = 0; - break; - } - -out: - if (bus->hosttype == BCMA_HOSTTYPE_SOC) - iounmap(eromptr); - - return err; -} diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index f24d245f8394..1b5fc0c3b1b5 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -5,8 +5,6 @@ struct bcma_soc { struct bcma_bus bus; - struct bcma_device core_cc; - struct bcma_device core_mips; }; int __init bcma_host_soc_register(struct bcma_soc *soc); -- cgit v1.2.3 From 872bf2fb69d90e3619befee842fc26db39d8e475 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:35 +0200 Subject: net/mlx4_core: Maintain a persistent memory for mlx4 device Maintain a persistent memory that should survive reset flow/PCI error. This comes as a preparation for coming series to support above flows. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/alias_GUID.c | 2 +- drivers/infiniband/hw/mlx4/mad.c | 3 +- drivers/infiniband/hw/mlx4/main.c | 17 ++-- drivers/infiniband/hw/mlx4/mr.c | 6 +- drivers/infiniband/hw/mlx4/sysfs.c | 6 +- drivers/net/ethernet/mellanox/mlx4/alloc.c | 15 +-- drivers/net/ethernet/mellanox/mlx4/catas.c | 13 +-- drivers/net/ethernet/mellanox/mlx4/cmd.c | 46 +++++---- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 4 +- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_main.c | 4 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 4 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 4 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 42 ++++---- drivers/net/ethernet/mellanox/mlx4/icm.c | 11 ++- drivers/net/ethernet/mellanox/mlx4/main.c | 106 +++++++++++++-------- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 9 +- drivers/net/ethernet/mellanox/mlx4/mr.c | 8 +- drivers/net/ethernet/mellanox/mlx4/pd.c | 6 +- drivers/net/ethernet/mellanox/mlx4/port.c | 17 ++-- drivers/net/ethernet/mellanox/mlx4/reset.c | 23 +++-- .../net/ethernet/mellanox/mlx4/resource_tracker.c | 36 ++++--- include/linux/mlx4/device.h | 11 ++- 24 files changed, 234 insertions(+), 163 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 0eb141c41416..a31e031afd87 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, continue; slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; - if (slave_id >= dev->dev->num_vfs + 1) + if (slave_id >= dev->dev->persist->num_vfs + 1) return; tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; form_cache_ag = get_cached_alias_guid(dev, port_num, diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 82a7dd87089b..c7619716c31d 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, ctx->ib_dev = &dev->ib_dev; for (i = 0; - i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); + i < min(dev->dev->caps.sqp_demux, + (u16)(dev->dev->persist->num_vfs + 1)); i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev->dev, i); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57ecc5b204f3..b4fa6f658800 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; - props->vendor_part_id = dev->dev->pdev->device; + props->vendor_part_id = dev->dev->persist->pdev->device; props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); @@ -1375,7 +1375,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); - return sprintf(buf, "MT%d\n", dev->dev->pdev->device); + return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, @@ -1937,7 +1937,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) int i; if (mlx4_is_master(ibdev->dev)) { - for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { + for (slave = 0; slave <= ibdev->dev->persist->num_vfs; + ++slave) { for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { for (i = 0; i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; @@ -1994,7 +1995,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { for (j = 0; j < eq_per_port; j++) { snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", - i, j, dev->pdev->bus->name); + i, j, dev->persist->pdev->bus->name); /* Set IRQ for specific name (per ring) */ if (mlx4_assign_eq(dev, name, NULL, &ibdev->eq_table[eq])) { @@ -2058,7 +2059,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); if (!ibdev) { - dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); + dev_err(&dev->persist->pdev->dev, + "Device struct alloc failed\n"); return NULL; } @@ -2085,7 +2087,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->num_ports = num_ports; ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; - ibdev->ib_dev.dma_device = &dev->pdev->dev; + ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; if (dev->caps.userspace_caps) ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; @@ -2236,7 +2238,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) sizeof(long), GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) { - dev_err(&dev->pdev->dev, "bit map alloc failed\n"); + dev_err(&dev->persist->pdev->dev, + "bit map alloc failed\n"); goto err_steer_qp_release; } diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index c36ccbd9a644..e0d271782d0a 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device if (!mfrpl->ibfrpl.page_list) goto err_free; - mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, + mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> + pdev->dev, size, &mfrpl->map, GFP_KERNEL); if (!mfrpl->mapped_page_list) @@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); int size = page_list->max_page_list_len * sizeof (u64); - dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, + dma_free_coherent(&dev->dev->persist->pdev->dev, size, + mfrpl->mapped_page_list, mfrpl->map); kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index cb4c66e723b5..d10c2b8a5dad 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) char base_name[9]; /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ - strlcpy(name, pci_name(dev->dev->pdev), max); + strlcpy(name, pci_name(dev->dev->persist->pdev), max); strncpy(base_name, name, 8); /*till xxxx:yy:*/ base_name[8] = '\0'; /* with no ARI only 3 last bits are used so when the fn is higher than 8 @@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device) if (!mlx4_is_master(device->dev)) return 0; - for (i = 0; i <= device->dev->num_vfs; ++i) + for (i = 0; i <= device->dev->persist->num_vfs; ++i) register_one_pkey_tree(device, i); return 0; @@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) if (!mlx4_is_master(device->dev)) return; - for (slave = device->dev->num_vfs; slave >= 0; --slave) { + for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { list_for_each_entry_safe(p, t, &device->pkeys.pkey_port_list[slave], entry) { diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 963dd7e6d547..a716c26e0d99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, buf->nbufs = 1; buf->npages = 1; buf->page_shift = get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, + buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, gfp); if (!buf->direct.buf) return -ENOMEM; @@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, for (i = 0; i < buf->nbufs; ++i) { buf->page_list[i].buf = - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, &t, gfp); if (!buf->page_list[i].buf) goto err_free; @@ -657,7 +658,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) int i; if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, + dma_free_coherent(&dev->persist->pdev->dev, size, + buf->direct.buf, buf->direct.map); else { if (BITS_PER_LONG == 64 && buf->direct.buf) @@ -665,7 +667,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) for (i = 0; i < buf->nbufs; ++i) if (buf->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + dma_free_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, buf->page_list[i].buf, buf->page_list[i].map); kfree(buf->page_list); @@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) goto out; - pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); + pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); if (!pgdir) { ret = -ENOMEM; goto out; @@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) set_bit(i, db->u.pgdir->bits[o]); if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, db->u.pgdir->db_page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); kfree(db->u.pgdir); diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 9c656fe4983d..1a102c9bac99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -70,7 +70,7 @@ static void poll_catas(unsigned long dev_ptr) if (readl(priv->catas_err.map)) { /* If the device is off-line, we cannot try to recover it */ - if (pci_channel_offline(dev->pdev)) + if (pci_channel_offline(dev->persist->pdev)) mod_timer(&priv->catas_err.timer, round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); else { @@ -94,6 +94,7 @@ static void catas_reset(struct work_struct *work) { struct mlx4_priv *priv, *tmppriv; struct mlx4_dev *dev; + struct mlx4_dev_persistent *persist; LIST_HEAD(tlist); int ret; @@ -103,20 +104,20 @@ static void catas_reset(struct work_struct *work) spin_unlock_irq(&catas_lock); list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { - struct pci_dev *pdev = priv->dev.pdev; + struct pci_dev *pdev = priv->dev.persist->pdev; /* If the device is off-line, we cannot reset it */ if (pci_channel_offline(pdev)) continue; - ret = mlx4_restart_one(priv->dev.pdev); + ret = mlx4_restart_one(priv->dev.persist->pdev); /* 'priv' now is not valid */ if (ret) pr_err("mlx4 %s: Reset failed (%d)\n", pci_name(pdev), ret); else { - dev = pci_get_drvdata(pdev); - mlx4_dbg(dev, "Reset succeeded\n"); + persist = pci_get_drvdata(pdev); + mlx4_dbg(persist->dev, "Reset succeeded\n"); } } } @@ -134,7 +135,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) init_timer(&priv->catas_err.timer); priv->catas_err.map = NULL; - addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + + addr = pci_resource_start(dev->persist->pdev, priv->fw.catas_bar) + priv->fw.catas_offset; priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 5c93d1451c44..7cd90e6a4272 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -307,7 +307,7 @@ static int cmd_pending(struct mlx4_dev *dev) { u32 status; - if (pci_channel_offline(dev->pdev)) + if (pci_channel_offline(dev->persist->pdev)) return -EIO; status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); @@ -328,7 +328,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, mutex_lock(&cmd->hcr_mutex); - if (pci_channel_offline(dev->pdev)) { + if (pci_channel_offline(dev->persist->pdev)) { /* * Device is going through error recovery * and cannot accept commands. @@ -342,7 +342,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); while (cmd_pending(dev)) { - if (pci_channel_offline(dev->pdev)) { + if (pci_channel_offline(dev->persist->pdev)) { /* * Device is going through error recovery * and cannot accept commands. @@ -464,7 +464,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, down(&priv->cmd.poll_sem); - if (pci_channel_offline(dev->pdev)) { + if (pci_channel_offline(dev->persist->pdev)) { /* * Device is going through error recovery * and cannot accept commands. @@ -487,7 +487,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, end = msecs_to_jiffies(timeout) + jiffies; while (cmd_pending(dev) && time_before(jiffies, end)) { - if (pci_channel_offline(dev->pdev)) { + if (pci_channel_offline(dev->persist->pdev)) { /* * Device is going through error recovery * and cannot accept commands. @@ -612,7 +612,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, int native) { - if (pci_channel_offline(dev->pdev)) + if (pci_channel_offline(dev->persist->pdev)) return -EIO; if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { @@ -1997,11 +1997,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) if (mlx4_is_master(dev)) priv->mfunc.comm = - ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.comm_bar) + priv->fw.comm_base, MLX4_COMM_PAGESIZE); else priv->mfunc.comm = - ioremap(pci_resource_start(dev->pdev, 2) + + ioremap(pci_resource_start(dev->persist->pdev, 2) + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); if (!priv->mfunc.comm) { mlx4_err(dev, "Couldn't map communication vector\n"); @@ -2107,9 +2108,9 @@ err_comm_admin: err_comm: iounmap(priv->mfunc.comm); err_vhcr: - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - priv->mfunc.vhcr, - priv->mfunc.vhcr_dma); + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + priv->mfunc.vhcr, + priv->mfunc.vhcr_dma); priv->mfunc.vhcr = NULL; return -ENOMEM; } @@ -2130,8 +2131,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) } if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { - priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + - MLX4_HCR_BASE, MLX4_HCR_SIZE); + priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, + 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); if (!priv->cmd.hcr) { mlx4_err(dev, "Couldn't map command register\n"); goto err; @@ -2140,7 +2141,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) } if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { - priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, + priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, &priv->mfunc.vhcr_dma, GFP_KERNEL); if (!priv->mfunc.vhcr) @@ -2150,7 +2152,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) } if (!priv->cmd.pool) { - priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, + priv->cmd.pool = pci_pool_create("mlx4_cmd", + dev->persist->pdev, MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE, 0); if (!priv->cmd.pool) @@ -2202,7 +2205,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) } if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); priv->mfunc.vhcr = NULL; } @@ -2306,8 +2309,9 @@ u32 mlx4_comm_get_version(void) static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) { - if ((vf < 0) || (vf >= dev->num_vfs)) { - mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); + if ((vf < 0) || (vf >= dev->persist->num_vfs)) { + mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", + vf, dev->persist->num_vfs); return -EINVAL; } @@ -2316,7 +2320,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) { - if (slave < 1 || slave > dev->num_vfs) { + if (slave < 1 || slave > dev->persist->num_vfs) { mlx4_err(dev, "Bad slave number:%d (number of activated slaves: %lu)\n", slave, dev->num_slaves); @@ -2388,7 +2392,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, if (port <= 0 || port > dev->caps.num_ports) return slaves_pport; - for (i = 0; i < dev->num_vfs + 1; i++) { + for (i = 0; i < dev->persist->num_vfs + 1; i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, i); if (test_bit(port - 1, actv_ports.ports)) @@ -2408,7 +2412,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); - for (i = 0; i < dev->num_vfs + 1; i++) { + for (i = 0; i < dev->persist->num_vfs + 1; i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, i); if (bitmap_equal(crit_ports->ports, actv_ports.ports, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 82322b1c8411..22da4d0d0f05 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. */ - set_dev_node(&mdev->dev->pdev->dev, node); + set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, cq->buf_size, 2 * PAGE_SIZE); - set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_cq; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 90e0f045a6bc..569eda9e83d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) (u16) (mdev->dev->caps.fw_ver >> 32), (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), (u16) (mdev->dev->caps.fw_ver & 0xffff)); - strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), + strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->regdump_len = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 9f16f754137b..c643d2bbb7b9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -241,8 +241,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) spin_lock_init(&mdev->uar_lock); mdev->dev = dev; - mdev->dma_device = &(dev->pdev->dev); - mdev->pdev = dev->pdev; + mdev->dma_device = &dev->persist->pdev->dev; + mdev->pdev = dev->persist->pdev; mdev->device_up = false; mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d0d6dc1b8e46..43a3f9822f74 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2457,7 +2457,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_set_real_num_tx_queues(dev, prof->tx_ring_num); netif_set_real_num_rx_queues(dev, prof->rx_ring_num); - SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); + SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); dev->dev_port = port - 1; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a0474eb94aa3..2ba5d368edce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -387,10 +387,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->rx_info, tmp); /* Allocate HW buffers on provided NUMA node */ - set_dev_node(&mdev->dev->pdev->dev, node); + set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); - set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_info; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 359bb1286eb5..55f9f5c5344e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ - set_dev_node(&mdev->dev->pdev->dev, node); + set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); - set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 3d275fbaf0eb..7538c9ce98a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -237,7 +237,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) struct mlx4_eqe eqe; /*don't send if we don't have the that slave */ - if (dev->num_vfs < slave) + if (dev->persist->num_vfs < slave) return 0; memset(&eqe, 0, sizeof eqe); @@ -255,7 +255,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, struct mlx4_eqe eqe; /*don't send if we don't have the that slave */ - if (dev->num_vfs < slave) + if (dev->persist->num_vfs < slave) return 0; memset(&eqe, 0, sizeof eqe); @@ -310,7 +310,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, port); - for (i = 0; i < dev->num_vfs + 1; i++) + for (i = 0; i < dev->persist->num_vfs + 1; i++) if (test_bit(i, slaves_pport.slaves)) set_and_calc_slave_port_state(dev, i, port, event, &gen_event); @@ -560,7 +560,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; - for (i = 0; i < dev->num_vfs + 1; i++) { + for (i = 0; i < dev->persist->num_vfs + 1; + i++) { if (!test_bit(i, slaves_port.slaves)) continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { @@ -596,7 +597,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) if (!mlx4_is_master(dev)) break; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - for (i = 0; i < dev->num_vfs + 1; i++) { + for (i = 0; + i < dev->persist->num_vfs + 1; + i++) { if (!test_bit(i, slaves_port.slaves)) continue; if (i == mlx4_master_func_num(dev)) @@ -865,7 +868,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) if (!priv->eq_table.uar_map[index]) { priv->eq_table.uar_map[index] = - ioremap(pci_resource_start(dev->pdev, 2) + + ioremap(pci_resource_start(dev->persist->pdev, 2) + ((eq->eqn / 4) << PAGE_SHIFT), PAGE_SIZE); if (!priv->eq_table.uar_map[index]) { @@ -928,8 +931,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { - eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, - PAGE_SIZE, &t, GFP_KERNEL); + eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> + pdev->dev, + PAGE_SIZE, &t, + GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free_pages; @@ -995,7 +1000,7 @@ err_out_free_eq: err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map); @@ -1044,9 +1049,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev, mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - eq->page_list[i].buf, - eq->page_list[i].map); + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); kfree(eq->page_list); mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); @@ -1060,7 +1065,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) int i, vec; if (eq_table->have_irq) - free_irq(dev->pdev->irq, dev); + free_irq(dev->persist->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { @@ -1089,7 +1094,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + + priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clr_int_bar) + priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); if (!priv->clr_base) { mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); @@ -1212,13 +1218,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-comp-%d@pci:%s", i, - pci_name(dev->pdev)); + pci_name(dev->persist->pdev)); } else { snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-async@pci:%s", - pci_name(dev->pdev)); + pci_name(dev->persist->pdev)); } eq_name = priv->eq_table.irq_names + @@ -1235,8 +1241,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, DRV_NAME "@pci:%s", - pci_name(dev->pdev)); - err = request_irq(dev->pdev->irq, mlx4_interrupt, + pci_name(dev->persist->pdev)); + err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) goto err_out_async; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 97c9b1db1d27..2a9dd460a95f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu int i; if (chunk->nsg > 0) - pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, + pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) @@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * int i; for (i = 0; i < chunk->npages; ++i) - dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, + dma_free_coherent(&dev->persist->pdev->dev, + chunk->mem[i].length, lowmem_page_address(sg_page(&chunk->mem[i])), sg_dma_address(&chunk->mem[i])); } @@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, --cur_order; if (coherent) - ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, + ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], cur_order, gfp_mask); else @@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, if (coherent) ++chunk->nsg; else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { - chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); @@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, } if (!coherent && chunk) { - chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 03e9eb0dc761..abcee61f8a47 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -318,10 +318,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) return -ENODEV; } - if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { + if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev_cap->uar_size, - (unsigned long long) pci_resource_len(dev->pdev, 2)); + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); return -ENODEV; } @@ -541,8 +542,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; - err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); - err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); + err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, + &lnkcap1); + err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, + &lnkcap2); if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) *speed = PCIE_SPEED_8_0GT; @@ -587,7 +590,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev) return; } - err = pcie_get_minimum_link(dev->pdev, &speed, &width); + err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); if (err || speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { mlx4_warn(dev, @@ -837,10 +840,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > - pci_resource_len(dev->pdev, 2)) { + pci_resource_len(dev->persist->pdev, + 2)) { mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev->caps.uar_page_size * dev->caps.num_uars, - (unsigned long long) pci_resource_len(dev->pdev, 2)); + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); goto err_mem; } @@ -1492,9 +1497,9 @@ static int map_bf_area(struct mlx4_dev *dev) if (!dev->caps.bf_reg_size) return -ENXIO; - bf_start = pci_resource_start(dev->pdev, 2) + + bf_start = pci_resource_start(dev->persist->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); - bf_len = pci_resource_len(dev->pdev, 2) - + bf_len = pci_resource_len(dev->persist->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); if (!priv->bf_mapping) @@ -1536,7 +1541,8 @@ static int map_internal_clock(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); priv->clock_mapping = - ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clock_bar) + priv->fw.clock_offset, MLX4_CLOCK_SIZE); if (!priv->clock_mapping) @@ -1705,7 +1711,8 @@ static void choose_steering_mode(struct mlx4_dev *dev, if (mlx4_log_num_mgm_entry_size <= 0 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || - (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && + (dev_cap->fs_max_num_qp_per_entry >= + (dev->persist->num_vfs + 1))) && choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= MLX4_MIN_MGM_LOG_ENTRY_SIZE) { dev->oper_log_mgm_entry_size = @@ -2288,7 +2295,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) for (i = 0; i < nreq; ++i) entries[i].entry = i; - nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); + nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, + nreq); if (nreq < 0) { kfree(entries); @@ -2316,7 +2324,7 @@ no_msi: dev->caps.comp_pool = 0; for (i = 0; i < 2; ++i) - priv->eq_table.eq[i].irq = dev->pdev->irq; + priv->eq_table.eq[i].irq = dev->persist->pdev->irq; } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) @@ -2344,7 +2352,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port_attr.show = show_port_type; sysfs_attr_init(&info->port_attr.attr); - err = device_create_file(&dev->pdev->dev, &info->port_attr); + err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); if (err) { mlx4_err(dev, "Failed to create file for port %d\n", port); info->port = -1; @@ -2361,10 +2369,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port_mtu_attr.show = show_port_ib_mtu; sysfs_attr_init(&info->port_mtu_attr.attr); - err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); + err = device_create_file(&dev->persist->pdev->dev, + &info->port_mtu_attr); if (err) { mlx4_err(dev, "Failed to create mtu file for port %d\n", port); - device_remove_file(&info->dev->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_attr); info->port = -1; } @@ -2376,8 +2386,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) if (info->port < 0) return; - device_remove_file(&info->dev->pdev->dev, &info->port_attr); - device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); + device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_mtu_attr); } static int mlx4_init_steering(struct mlx4_dev *dev) @@ -2444,10 +2455,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev) void __iomem *owner; u32 ret; - if (pci_channel_offline(dev->pdev)) + if (pci_channel_offline(dev->persist->pdev)) return -EIO; - owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); @@ -2463,10 +2475,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) { void __iomem *owner; - if (pci_channel_offline(dev->pdev)) + if (pci_channel_offline(dev->persist->pdev)) return; - owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); @@ -2514,13 +2527,13 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, dev_flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER; dev_flags &= ~MLX4_FLAG_SLAVE; - dev->num_vfs = total_vfs; + dev->persist->num_vfs = total_vfs; } return dev_flags; disable_sriov: atomic_dec(&pf_loading); - dev->num_vfs = 0; + dev->persist->num_vfs = 0; kfree(dev->dev_vfs); return dev_flags & ~MLX4_FLAG_MASTER; } @@ -2607,7 +2620,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, existing_vfs = pci_num_vf(pdev); if (existing_vfs) dev->flags |= MLX4_FLAG_SRIOV; - dev->num_vfs = total_vfs; + dev->persist->num_vfs = total_vfs; } } @@ -2771,12 +2784,14 @@ slave_start: dev->caps.num_ports); goto err_close; } - memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); + memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); - for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { + for (i = 0; + i < sizeof(dev->persist->nvfs)/ + sizeof(dev->persist->nvfs[0]); i++) { unsigned j; - for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { + for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; dev->dev_vfs[sum].n_ports = i < 2 ? 1 : dev->caps.num_ports; @@ -2846,7 +2861,7 @@ slave_start: priv->removed = 0; - if (mlx4_is_master(dev) && dev->num_vfs) + if (mlx4_is_master(dev) && dev->persist->num_vfs) atomic_dec(&pf_loading); kfree(dev_cap); @@ -2908,7 +2923,7 @@ err_sriov: if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) pci_disable_sriov(pdev); - if (mlx4_is_master(dev) && dev->num_vfs) + if (mlx4_is_master(dev) && dev->persist->num_vfs) atomic_dec(&pf_loading); kfree(priv->dev.dev_vfs); @@ -3076,20 +3091,28 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; dev = &priv->dev; - dev->pdev = pdev; - pci_set_drvdata(pdev, dev); + dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); + if (!dev->persist) { + kfree(priv); + return -ENOMEM; + } + dev->persist->pdev = pdev; + dev->persist->dev = dev; + pci_set_drvdata(pdev, dev->persist); priv->pci_dev_data = id->driver_data; ret = __mlx4_init_one(pdev, id->driver_data, priv); - if (ret) + if (ret) { + kfree(dev->persist); kfree(priv); - + } return ret; } static void mlx4_unload_one(struct pci_dev *pdev) { - struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; int p; @@ -3155,7 +3178,7 @@ static void mlx4_unload_one(struct pci_dev *pdev) mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); dev->flags &= ~MLX4_FLAG_SRIOV; - dev->num_vfs = 0; + dev->persist->num_vfs = 0; } if (!mlx4_is_slave(dev)) @@ -3175,26 +3198,29 @@ static void mlx4_unload_one(struct pci_dev *pdev) static void mlx4_remove_one(struct pci_dev *pdev) { - struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); mlx4_unload_one(pdev); pci_release_regions(pdev); pci_disable_device(pdev); + kfree(dev->persist); kfree(priv); pci_set_drvdata(pdev, NULL); } int mlx4_restart_one(struct pci_dev *pdev) { - struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; int pci_dev_data, err, total_vfs; pci_dev_data = priv->pci_dev_data; - total_vfs = dev->num_vfs; - memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index bdd4eea2247c..faa37ab75a9d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -221,16 +221,17 @@ extern int mlx4_debug_level; #define mlx4_dbg(mdev, format, ...) \ do { \ if (mlx4_debug_level) \ - dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ + dev_printk(KERN_DEBUG, \ + &(mdev)->persist->pdev->dev, format, \ ##__VA_ARGS__); \ } while (0) #define mlx4_err(mdev, format, ...) \ - dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) #define mlx4_info(mdev, format, ...) \ - dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) #define mlx4_warn(mdev, format, ...) \ - dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7094a9c70fd5..8dbdf1d29357 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -708,13 +708,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, if (!mtts) return -ENOMEM; - dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, + dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single_for_device(&dev->pdev->dev, dma_handle, + dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); return 0; @@ -1020,13 +1020,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list /* Make sure MPT status is visible before writing MTT entries */ wmb(); - dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, + dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, + dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); fmr->mpt->key = cpu_to_be32(key); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 74216071201f..a42b4c0a9ed9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) return -ENOMEM; if (mlx4_is_slave(dev)) - offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / + offset = uar->index % ((int)pci_resource_len(dev->persist->pdev, + 2) / dev->caps.uar_page_size); else offset = uar->index; - uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; + uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT) + + offset; uar->map = NULL; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 30eb1ead0fe6..9f268f05290a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( dev, &exclusive_ports); slave_gid -= bitmap_weight(slaves_pport_actv.slaves, - dev->num_vfs + 1); + dev->persist->num_vfs + 1); } - vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; @@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( dev, &exclusive_ports); slave_gid -= bitmap_weight(slaves_pport_actv.slaves, - dev->num_vfs + 1); + dev->persist->num_vfs + 1); } gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; - vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; if (slave_gid <= gids % vfs) return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); @@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) int num_eth_ports, err; int i; - if (slave < 0 || slave > dev->num_vfs) + if (slave < 0 || slave > dev->persist->num_vfs) return; actv_ports = mlx4_get_active_ports(dev, slave); @@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, return -EINVAL; slaves_pport = mlx4_phys_to_slaves_pport(dev, port); - num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + num_vfs = bitmap_weight(slaves_pport.slaves, + dev->persist->num_vfs + 1) - 1; for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, @@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, dev, &exclusive_ports); num_vfs_before += bitmap_weight( slaves_pport_actv.slaves, - dev->num_vfs + 1); + dev->persist->num_vfs + 1); } /* candidate_slave_gid isn't necessarily the correct slave, but @@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, dev, &exclusive_ports); slave_gid += bitmap_weight( slaves_pport_actv.slaves, - dev->num_vfs + 1); + dev->persist->num_vfs + 1); } } *slave_id = slave_gid; diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c index ea1c6d092145..0076d88587ca 100644 --- a/drivers/net/ethernet/mellanox/mlx4/reset.c +++ b/drivers/net/ethernet/mellanox/mlx4/reset.c @@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev) goto out; } - pcie_cap = pci_pcie_cap(dev->pdev); + pcie_cap = pci_pcie_cap(dev->persist->pdev); for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; - if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { + if (pci_read_config_dword(dev->persist->pdev, i * 4, + hca_header + i)) { err = -ENODEV; mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); goto out; } } - reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, + reset = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_RESET_BASE, MLX4_RESET_SIZE); if (!reset) { err = -ENOMEM; @@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev) end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; do { - if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && - vendor != 0xffff) + if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID, + &vendor) && vendor != 0xffff) break; msleep(1); @@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev) /* Now restore the PCI headers */ if (pcie_cap) { devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; - if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_DEVCTL, devctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); goto out; } linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; - if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_LNKCTL, linkctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); @@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev) if (i * 4 == PCI_COMMAND) continue; - if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { + if (pci_write_config_dword(dev->persist->pdev, i * 4, + hca_header[i])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", i); @@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev) } } - if (pci_write_config_dword(dev->pdev, PCI_COMMAND, + if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND, hca_header[PCI_COMMAND / 4])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 4efbd1eca611..3e93879bccce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, int allocated, free, reserved, guaranteed, from_free; int from_rsvd; - if (slave > dev->num_vfs) + if (slave > dev->persist->num_vfs) return -EINVAL; spin_lock(&res_alloc->alloc_lock); allocated = (port > 0) ? - res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : res_alloc->allocated[slave]; free = (port > 0) ? res_alloc->res_port_free[port - 1] : res_alloc->res_free; @@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, if (!err) { /* grant the request */ if (port > 0) { - res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] += count; res_alloc->res_port_free[port - 1] -= count; res_alloc->res_port_rsvd[port - 1] -= from_rsvd; } else { @@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, &priv->mfunc.master.res_tracker.res_alloc[res_type]; int allocated, guaranteed, from_rsvd; - if (slave > dev->num_vfs) + if (slave > dev->persist->num_vfs) return; spin_lock(&res_alloc->alloc_lock); allocated = (port > 0) ? - res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : res_alloc->allocated[slave]; guaranteed = res_alloc->guaranteed[slave]; @@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, } if (port > 0) { - res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] -= count; res_alloc->res_port_free[port - 1] += count; res_alloc->res_port_rsvd[port - 1] += from_rsvd; } else { @@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev, enum mlx4_resource res_type, int vf, int num_instances) { - res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); + res_alloc->guaranteed[vf] = num_instances / + (2 * (dev->persist->num_vfs + 1)); res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; if (vf == mlx4_master_func_num(dev)) { res_alloc->res_free = num_instances; @@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { struct resource_allocator *res_alloc = &priv->mfunc.master.res_tracker.res_alloc[i]; - res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); - res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); + res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); + res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); if (i == RES_MAC || i == RES_VLAN) res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * - (dev->num_vfs + 1) * sizeof(int), - GFP_KERNEL); + (dev->persist->num_vfs + + 1) * + sizeof(int), GFP_KERNEL); else - res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); + res_alloc->allocated = kzalloc((dev->persist-> + num_vfs + 1) * + sizeof(int), GFP_KERNEL); if (!res_alloc->quota || !res_alloc->guaranteed || !res_alloc->allocated) goto no_mem_err; spin_lock_init(&res_alloc->alloc_lock); - for (t = 0; t < dev->num_vfs + 1; t++) { + for (t = 0; t < dev->persist->num_vfs + 1; t++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, t); switch (i) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f1e41b33462f..1069ce65e8b4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -744,8 +744,15 @@ struct mlx4_vf_dev { u8 n_ports; }; -struct mlx4_dev { +struct mlx4_dev_persistent { struct pci_dev *pdev; + struct mlx4_dev *dev; + int nvfs[MLX4_MAX_PORTS + 1]; + int num_vfs; +}; + +struct mlx4_dev { + struct mlx4_dev_persistent *persist; unsigned long flags; unsigned long num_slaves; struct mlx4_caps caps; @@ -754,13 +761,11 @@ struct mlx4_dev { struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; - int num_vfs; int numa_node; int oper_log_mgm_entry_size; u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; struct mlx4_vf_dev *dev_vfs; - int nvfs[MLX4_MAX_PORTS + 1]; }; struct mlx4_eqe { -- cgit v1.2.3 From dd0eefe3abbf47442db296bf68f27eb2860c1cdf Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:36 +0200 Subject: net/mlx4_core: Set device configuration data to be persistent across reset When an HCA enters an internal error state, this is detected by the driver. The driver then should reset the HCA and restart the software stack. Keep ports information and some SRIOV configuration in a persistent area to have it valid across reset. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 45 +++++++++++++++++++++++++++++-- include/linux/mlx4/device.h | 2 ++ 2 files changed, 45 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index abcee61f8a47..2c5a555dff89 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3109,18 +3109,34 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } +static void mlx4_clean_dev(struct mlx4_dev *dev) +{ + struct mlx4_dev_persistent *persist = dev->persist; + struct mlx4_priv *priv = mlx4_priv(dev); + + memset(priv, 0, sizeof(*priv)); + priv->dev.persist = persist; +} + static void mlx4_unload_one(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; - int p; + int p, i; int active_vfs = 0; if (priv->removed) return; + /* saving current ports type for further use */ + for (i = 0; i < dev->caps.num_ports; i++) { + dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; + dev->persist->curr_port_poss_type[i] = dev->caps. + possible_type[i + 1]; + } + pci_dev_data = priv->pci_dev_data; /* Disabling SR-IOV is not allowed while there are active vf's */ @@ -3191,7 +3207,7 @@ static void mlx4_unload_one(struct pci_dev *pdev) kfree(dev->caps.qp1_proxy); kfree(dev->dev_vfs); - memset(priv, 0, sizeof(*priv)); + mlx4_clean_dev(dev); priv->pci_dev_data = pci_dev_data; priv->removed = 1; } @@ -3210,6 +3226,25 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +static int restore_current_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *types, + enum mlx4_port_type *poss_types) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err, i; + + mlx4_stop_sense(dev); + + mutex_lock(&priv->port_mutex); + for (i = 0; i < dev->caps.num_ports; i++) + dev->caps.possible_type[i + 1] = poss_types[i]; + err = mlx4_change_port_types(dev, types); + mlx4_start_sense(dev); + mutex_unlock(&priv->port_mutex); + + return err; +} + int mlx4_restart_one(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); @@ -3230,6 +3265,12 @@ int mlx4_restart_one(struct pci_dev *pdev) return err; } + err = restore_current_port_types(dev, dev->persist->curr_port_type, + dev->persist->curr_port_poss_type); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", + err); + return err; } diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 1069ce65e8b4..8c3837ac1a2d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -749,6 +749,8 @@ struct mlx4_dev_persistent { struct mlx4_dev *dev; int nvfs[MLX4_MAX_PORTS + 1]; int num_vfs; + enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1]; + enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1]; }; struct mlx4_dev { -- cgit v1.2.3 From ad9a0bf08ffbf32b8f292c3bb78ca0f24bb8f6b2 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:37 +0200 Subject: net/mlx4_core: Refactor the catas flow to work per device Using a WQ per device instead of a single global WQ, this allows independent reset handling per device even when SRIOV is used. This comes as a pre-patch for supporting chip reset for both native and SRIOV. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 75 ++++++++++++++---------------- drivers/net/ethernet/mellanox/mlx4/main.c | 12 ++++- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 +- include/linux/mlx4/device.h | 2 + 4 files changed, 48 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 1a102c9bac99..5bb9aa6e281d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -40,10 +40,7 @@ enum { MLX4_CATAS_POLL_INTERVAL = 5 * HZ, }; -static DEFINE_SPINLOCK(catas_lock); -static LIST_HEAD(catas_list); -static struct work_struct catas_work; static int internal_err_reset = 1; module_param(internal_err_reset, int, 0644); @@ -77,13 +74,9 @@ static void poll_catas(unsigned long dev_ptr) dump_err_buf(dev); mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); - if (internal_err_reset) { - spin_lock(&catas_lock); - list_add(&priv->catas_err.list, &catas_list); - spin_unlock(&catas_lock); - - queue_work(mlx4_wq, &catas_work); - } + if (internal_err_reset) + queue_work(dev->persist->catas_wq, + &dev->persist->catas_work); } } else mod_timer(&priv->catas_err.timer, @@ -92,34 +85,23 @@ static void poll_catas(unsigned long dev_ptr) static void catas_reset(struct work_struct *work) { - struct mlx4_priv *priv, *tmppriv; - struct mlx4_dev *dev; - struct mlx4_dev_persistent *persist; - - LIST_HEAD(tlist); + struct mlx4_dev_persistent *persist = + container_of(work, struct mlx4_dev_persistent, + catas_work); + struct pci_dev *pdev = persist->pdev; int ret; - spin_lock_irq(&catas_lock); - list_splice_init(&catas_list, &tlist); - spin_unlock_irq(&catas_lock); - - list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { - struct pci_dev *pdev = priv->dev.persist->pdev; - - /* If the device is off-line, we cannot reset it */ - if (pci_channel_offline(pdev)) - continue; + /* If the device is off-line, we cannot reset it */ + if (pci_channel_offline(pdev)) + return; - ret = mlx4_restart_one(priv->dev.persist->pdev); - /* 'priv' now is not valid */ - if (ret) - pr_err("mlx4 %s: Reset failed (%d)\n", - pci_name(pdev), ret); - else { - persist = pci_get_drvdata(pdev); - mlx4_dbg(persist->dev, "Reset succeeded\n"); - } - } + ret = mlx4_restart_one(pdev); + /* 'priv' now is not valid */ + if (ret) + pr_err("mlx4 %s: Reset failed (%d)\n", + pci_name(pdev), ret); + else + mlx4_dbg(persist->dev, "Reset succeeded\n"); } void mlx4_start_catas_poll(struct mlx4_dev *dev) @@ -158,15 +140,26 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev) del_timer_sync(&priv->catas_err.timer); - if (priv->catas_err.map) + if (priv->catas_err.map) { iounmap(priv->catas_err.map); + priv->catas_err.map = NULL; + } +} - spin_lock_irq(&catas_lock); - list_del(&priv->catas_err.list); - spin_unlock_irq(&catas_lock); +int mlx4_catas_init(struct mlx4_dev *dev) +{ + INIT_WORK(&dev->persist->catas_work, catas_reset); + dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health"); + if (!dev->persist->catas_wq) + return -ENOMEM; + + return 0; } -void __init mlx4_catas_init(void) +void mlx4_catas_end(struct mlx4_dev *dev) { - INIT_WORK(&catas_work, catas_reset); + if (dev->persist->catas_wq) { + destroy_workqueue(dev->persist->catas_wq); + dev->persist->catas_wq = NULL; + } } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2c5a555dff89..a61694cc1476 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3064,11 +3064,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, } } - err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + err = mlx4_catas_init(&priv->dev); if (err) goto err_release_regions; + + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + if (err) + goto err_catas; + return 0; +err_catas: + mlx4_catas_end(&priv->dev); + err_release_regions: pci_release_regions(pdev); @@ -3219,6 +3227,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_priv *priv = mlx4_priv(dev); mlx4_unload_one(pdev); + mlx4_catas_end(dev); pci_release_regions(pdev); pci_disable_device(pdev); kfree(dev->persist); @@ -3403,7 +3412,6 @@ static int __init mlx4_init(void) if (mlx4_verify_params()) return -EINVAL; - mlx4_catas_init(); mlx4_wq = create_singlethread_workqueue("mlx4"); if (!mlx4_wq) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index faa37ab75a9d..d41af84f965b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -995,7 +995,8 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); -void mlx4_catas_init(void); +int mlx4_catas_init(struct mlx4_dev *dev); +void mlx4_catas_end(struct mlx4_dev *dev); int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 8c3837ac1a2d..da425d2f3708 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -751,6 +751,8 @@ struct mlx4_dev_persistent { int num_vfs; enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1]; enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1]; + struct work_struct catas_work; + struct workqueue_struct *catas_wq; }; struct mlx4_dev { -- cgit v1.2.3 From f6bc11e42646e661e699a5593cbd1e9dba7191d0 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:38 +0200 Subject: net/mlx4_core: Enhance the catas flow to support device reset This includes: - resetting the chip when a fatal error is detected (the current code does not do this). - exposing the ability to enter error state from outside the catas code by calling its functionality. (E.g. FW Command timeout, AER error). - managing a persistent device state. This is needed to sync between reset flow cases. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 122 ++++++++++++++++++++++------- drivers/net/ethernet/mellanox/mlx4/main.c | 6 ++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 +- include/linux/mlx4/device.h | 7 ++ 4 files changed, 108 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 5bb9aa6e281d..588d6b5e1211 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -48,6 +48,83 @@ MODULE_PARM_DESC(internal_err_reset, "Reset device on internal errors if non-zero" " (default 1, in SRIOV mode default is 0)"); +static int read_vendor_id(struct mlx4_dev *dev) +{ + u16 vendor_id = 0; + int ret; + + ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id); + if (ret) { + mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret); + return ret; + } + + if (vendor_id == 0xffff) { + mlx4_err(dev, "PCI can't be accessed to read vendor id\n"); + return -EINVAL; + } + + return 0; +} + +static int mlx4_reset_master(struct mlx4_dev *dev) +{ + int err = 0; + + if (!pci_channel_offline(dev->persist->pdev)) { + err = read_vendor_id(dev); + /* If PCI can't be accessed to read vendor ID we assume that its + * link was disabled and chip was already reset. + */ + if (err) + return 0; + + err = mlx4_reset(dev); + if (err) + mlx4_err(dev, "Fail to reset HCA\n"); + } + + return err; +} + +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) +{ + int err; + struct mlx4_dev *dev; + + if (!internal_err_reset) + return; + + mutex_lock(&persist->device_state_mutex); + if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + goto out; + + dev = persist->dev; + mlx4_err(dev, "device is going to be reset\n"); + err = mlx4_reset_master(dev); + BUG_ON(err != 0); + + dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; + mlx4_err(dev, "device was reset successfully\n"); + mutex_unlock(&persist->device_state_mutex); + + /* At that step HW was already reset, now notify clients */ + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + return; + +out: + mutex_unlock(&persist->device_state_mutex); +} + +static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) +{ + int err = 0; + + mlx4_enter_error_state(persist); + err = mlx4_restart_one(persist->pdev); + mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); +} + static void dump_err_buf(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -66,21 +143,22 @@ static void poll_catas(unsigned long dev_ptr) struct mlx4_priv *priv = mlx4_priv(dev); if (readl(priv->catas_err.map)) { - /* If the device is off-line, we cannot try to recover it */ - if (pci_channel_offline(dev->persist->pdev)) - mod_timer(&priv->catas_err.timer, - round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); - else { - dump_err_buf(dev); - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); - - if (internal_err_reset) - queue_work(dev->persist->catas_wq, - &dev->persist->catas_work); - } - } else - mod_timer(&priv->catas_err.timer, - round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); + dump_err_buf(dev); + goto internal_err; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mlx4_warn(dev, "Internal error mark was detected on device\n"); + goto internal_err; + } + + mod_timer(&priv->catas_err.timer, + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); + return; + +internal_err: + if (internal_err_reset) + queue_work(dev->persist->catas_wq, &dev->persist->catas_work); } static void catas_reset(struct work_struct *work) @@ -88,20 +166,8 @@ static void catas_reset(struct work_struct *work) struct mlx4_dev_persistent *persist = container_of(work, struct mlx4_dev_persistent, catas_work); - struct pci_dev *pdev = persist->pdev; - int ret; - - /* If the device is off-line, we cannot reset it */ - if (pci_channel_offline(pdev)) - return; - ret = mlx4_restart_one(pdev); - /* 'priv' now is not valid */ - if (ret) - pr_err("mlx4 %s: Reset failed (%d)\n", - pci_name(pdev), ret); - else - mlx4_dbg(persist->dev, "Reset succeeded\n"); + mlx4_handle_error_state(persist); } void mlx4_start_catas_poll(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a61694cc1476..dc2d910fcc88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2624,6 +2624,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, } } + /* on load remove any previous indication of internal error, + * device is up. + */ + dev->persist->state = MLX4_DEVICE_STATE_UP; + slave_start: err = mlx4_cmd_init(dev); if (err) { @@ -3108,6 +3113,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev->persist->dev = dev; pci_set_drvdata(pdev, dev->persist); priv->pci_dev_data = id->driver_data; + mutex_init(&dev->persist->device_state_mutex); ret = __mlx4_init_one(pdev, id->driver_data, priv); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index d41af84f965b..aa1ecbc5a606 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1178,7 +1178,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); -void mlx4_handle_catas_err(struct mlx4_dev *dev); +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index da425d2f3708..7d5d317cb7a6 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -411,6 +411,11 @@ enum { MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, }; +enum { + MLX4_DEVICE_STATE_UP = 1 << 0, + MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1, +}; + #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) @@ -753,6 +758,8 @@ struct mlx4_dev_persistent { enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1]; struct work_struct catas_work; struct workqueue_struct *catas_wq; + struct mutex device_state_mutex; /* protect HW state */ + u8 state; }; struct mlx4_dev { -- cgit v1.2.3 From f5aef5aa35063f2b45c3605871cd525d0cb7fb7a Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:39 +0200 Subject: net/mlx4_core: Activate reset flow upon fatal command cases We activate reset flow upon command fatal errors, when the device enters an erroneous state, and must be reset. The cases below are assumed to be fatal: FW command timed-out, an error from FW on closing commands, pci is offline when posting/pending a command. In those cases we place the device into an error state: chip is reset, pending commands are awakened and completed immediately. Subsequent commands will return immediately. The return code in the above cases will depend on the command. Commands which free and close resources will return success (because the chip was reset, so callers may safely free their kernel resources). Other commands will return -EIO. Since the device's state was marked as error, the catas poller will detect this and restart the device's software stack (as is done when a FW internal error is directly detected). The device state is protected by a persistent mutex lives on its mlx4_dev, as such no need any more for the hcr_mutex which is removed. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 11 +- drivers/net/ethernet/mellanox/mlx4/cmd.c | 163 +++++++++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx4/mcg.c | 3 + drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 +- include/linux/mlx4/cmd.h | 1 + 5 files changed, 153 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 588d6b5e1211..63f14ffcc906 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -42,8 +42,8 @@ enum { -static int internal_err_reset = 1; -module_param(internal_err_reset, int, 0644); +int mlx4_internal_err_reset = 1; +module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644); MODULE_PARM_DESC(internal_err_reset, "Reset device on internal errors if non-zero" " (default 1, in SRIOV mode default is 0)"); @@ -92,7 +92,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) int err; struct mlx4_dev *dev; - if (!internal_err_reset) + if (!mlx4_internal_err_reset) return; mutex_lock(&persist->device_state_mutex); @@ -110,6 +110,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) /* At that step HW was already reset, now notify clients */ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + mlx4_cmd_wake_completions(dev); return; out: @@ -157,7 +158,7 @@ static void poll_catas(unsigned long dev_ptr) return; internal_err: - if (internal_err_reset) + if (mlx4_internal_err_reset) queue_work(dev->persist->catas_wq, &dev->persist->catas_work); } @@ -177,7 +178,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) /*If we are in SRIOV the default of the module param must be 0*/ if (mlx4_is_mfunc(dev)) - internal_err_reset = 0; + mlx4_internal_err_reset = 0; INIT_LIST_HEAD(&priv->catas_err.list); init_timer(&priv->catas_err.timer); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 7cd90e6a4272..3895b2b5fc92 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -182,6 +182,72 @@ static u8 mlx4_errno_to_status(int errno) } } +static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op, + u8 op_modifier) +{ + switch (op) { + case MLX4_CMD_UNMAP_ICM: + case MLX4_CMD_UNMAP_ICM_AUX: + case MLX4_CMD_UNMAP_FA: + case MLX4_CMD_2RST_QP: + case MLX4_CMD_HW2SW_EQ: + case MLX4_CMD_HW2SW_CQ: + case MLX4_CMD_HW2SW_SRQ: + case MLX4_CMD_HW2SW_MPT: + case MLX4_CMD_CLOSE_HCA: + case MLX4_QP_FLOW_STEERING_DETACH: + case MLX4_CMD_FREE_RES: + case MLX4_CMD_CLOSE_PORT: + return CMD_STAT_OK; + + case MLX4_CMD_QP_ATTACH: + /* On Detach case return success */ + if (op_modifier == 0) + return CMD_STAT_OK; + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + + default: + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + } +} + +static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status) +{ + /* Any error during the closing commands below is considered fatal */ + if (op == MLX4_CMD_CLOSE_HCA || + op == MLX4_CMD_HW2SW_EQ || + op == MLX4_CMD_HW2SW_CQ || + op == MLX4_CMD_2RST_QP || + op == MLX4_CMD_HW2SW_SRQ || + op == MLX4_CMD_SYNC_TPT || + op == MLX4_CMD_UNMAP_ICM || + op == MLX4_CMD_UNMAP_ICM_AUX || + op == MLX4_CMD_UNMAP_FA) + return 1; + /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals + * CMD_STAT_REG_BOUND. + * This status indicates that memory region has memory windows bound to it + * which may result from invalid user space usage and is not fatal. + */ + if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND) + return 1; + return 0; +} + +static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, + int err) +{ + /* Only if reset flow is really active return code is based on + * command, otherwise current error code is returned. + */ + if (mlx4_internal_err_reset) { + mlx4_enter_error_state(dev->persist); + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + } + + return err; +} + static int comm_pending(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -258,7 +324,7 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, cmd->free_head = context->next; spin_unlock(&cmd->context_lock); - init_completion(&context->done); + reinit_completion(&context->done); mlx4_comm_cmd_post(dev, op, param); @@ -323,17 +389,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; u32 __iomem *hcr = cmd->hcr; - int ret = -EAGAIN; + int ret = -EIO; unsigned long end; - mutex_lock(&cmd->hcr_mutex); - - if (pci_channel_offline(dev->persist->pdev)) { + mutex_lock(&dev->persist->device_state_mutex); + /* To avoid writing to unknown addresses after the device state was + * changed to internal error and the chip was reset, + * check the INTERNAL_ERROR flag which is updated under + * device_state_mutex lock. + */ + if (pci_channel_offline(dev->persist->pdev) || + (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { /* * Device is going through error recovery * and cannot accept commands. */ - ret = -EIO; goto out; } @@ -347,7 +417,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, * Device is going through error recovery * and cannot accept commands. */ - ret = -EIO; goto out; } @@ -391,7 +460,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, ret = 0; out: - mutex_unlock(&cmd->hcr_mutex); + if (ret) + mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n", + op, ret, in_param, in_modifier, op_modifier); + mutex_unlock(&dev->persist->device_state_mutex); + return ret; } @@ -464,12 +537,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, down(&priv->cmd.poll_sem); - if (pci_channel_offline(dev->persist->pdev)) { + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { /* * Device is going through error recovery * and cannot accept commands. */ - err = -EIO; + err = mlx4_internal_err_ret_value(dev, op, op_modifier); goto out; } @@ -483,7 +556,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); if (err) - goto out; + goto out_reset; end = msecs_to_jiffies(timeout) + jiffies; while (cmd_pending(dev) && time_before(jiffies, end)) { @@ -493,6 +566,11 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, * and cannot accept commands. */ err = -EIO; + goto out_reset; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = mlx4_internal_err_ret_value(dev, op, op_modifier); goto out; } @@ -502,8 +580,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, if (cmd_pending(dev)) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); - err = -ETIMEDOUT; - goto out; + err = -EIO; + goto out_reset; } if (out_is_imm) @@ -515,10 +593,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, stat = be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; err = mlx4_status_to_errno(stat); - if (err) + if (err) { mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", op, stat); + if (mlx4_closing_cmd_fatal_error(op, stat)) + goto out_reset; + goto out; + } +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); out: up(&priv->cmd.poll_sem); return err; @@ -565,17 +650,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, goto out; } - init_completion(&context->done); + reinit_completion(&context->done); - mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, - in_modifier, op_modifier, op, context->token, 1); + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, context->token, 1); + if (err) + goto out_reset; if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); - err = -EBUSY; - goto out; + err = -EIO; + goto out_reset; } err = context->result; @@ -592,12 +679,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, else mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", op, context->fw_status); + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + else if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) + goto out_reset; + goto out; } if (out_is_imm) *out_param = context->out_param; +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); out: spin_lock(&cmd->context_lock); context->next = cmd->free_head; @@ -613,9 +708,12 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, u16 op, unsigned long timeout, int native) { if (pci_channel_offline(dev->persist->pdev)) - return -EIO; + return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + return mlx4_internal_err_ret_value(dev, op, + op_modifier); if (mlx4_priv(dev)->cmd.use_events) return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, in_modifier, @@ -2121,7 +2219,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev) int flags = 0; if (!priv->cmd.initialized) { - mutex_init(&priv->cmd.hcr_mutex); mutex_init(&priv->cmd.slave_cmd_mutex); sema_init(&priv->cmd.poll_sem, 1); priv->cmd.use_events = 0; @@ -2232,6 +2329,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) for (i = 0; i < priv->cmd.max_cmds; ++i) { priv->cmd.context[i].token = i; priv->cmd.context[i].next = i + 1; + /* To support fatal error flow, initialize all + * cmd contexts to allow simulating completions + * with complete() at any time. + */ + init_completion(&priv->cmd.context[i].done); } priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; @@ -2329,6 +2431,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) return slave - 1; } +void mlx4_cmd_wake_completions(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context; + int i; + + spin_lock(&priv->cmd.context_lock); + if (priv->cmd.context) { + for (i = 0; i < priv->cmd.max_cmds; ++i) { + context = &priv->cmd.context[i]; + context->fw_status = CMD_STAT_INTERNAL_ERR; + context->result = + mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + complete(&context->done); + } + } + spin_unlock(&priv->cmd.context_lock); +} + struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) { struct mlx4_active_ports actv_ports; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index a3867e7ef885..d22d9283d2cd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1318,6 +1318,9 @@ out: mutex_unlock(&priv->mcg_table.mutex); mlx4_free_cmd_mailbox(dev, mailbox); + if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + /* In case device is under an error, return success as a closing command */ + err = 0; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index aa1ecbc5a606..5c772ea4473b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -235,6 +235,7 @@ do { \ extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; +extern int mlx4_internal_err_reset; #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) #define ALL_SLAVES 0xff @@ -607,7 +608,6 @@ struct mlx4_mgm { struct mlx4_cmd { struct pci_pool *pool; void __iomem *hcr; - struct mutex hcr_mutex; struct mutex slave_cmd_mutex; struct semaphore poll_sem; struct semaphore event_sem; diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 64d25941b329..e7543844cc7a 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -279,6 +279,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); int mlx4_config_dev_retrieval(struct mlx4_dev *dev, struct mlx4_config_dev_params *params); +void mlx4_cmd_wake_completions(struct mlx4_dev *dev); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) -- cgit v1.2.3 From c69453e294c9f16da977b68e658a8028b854c209 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:40 +0200 Subject: net/mlx4_core: Manage interface state for Reset flow cases We need to manage interface state to sync between reset flow and some other relative cases such as remove_one. This has to be done to prevent certain races. For example in case software stack is down as a result of unload call, the remove_one should skip the unload phase. Implement the remove_one case, handling AER and other cases comes next. The interface can be up/down, upon remove_one, the state will include an extra bit indicating that the device is cleaned-up, forcing other tasks to finish before the final cleanup. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 13 +++++++++++-- drivers/net/ethernet/mellanox/mlx4/intf.c | 2 ++ drivers/net/ethernet/mellanox/mlx4/main.c | 13 ++++++++++++- include/linux/mlx4/device.h | 7 +++++++ 4 files changed, 32 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 63f14ffcc906..3fcf3cfaedfc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -122,8 +122,14 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) int err = 0; mlx4_enter_error_state(persist); - err = mlx4_restart_one(persist->pdev); - mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP && + !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { + err = mlx4_restart_one(persist->pdev); + mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", + err); + } + mutex_unlock(&persist->interface_state_mutex); } static void dump_err_buf(struct mlx4_dev *dev) @@ -211,6 +217,9 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev) iounmap(priv->catas_err.map); priv->catas_err.map = NULL; } + + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION) + flush_workqueue(dev->persist->catas_wq); } int mlx4_catas_init(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 116895ac8b35..fba0b96a6f28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -138,6 +138,7 @@ int mlx4_register_device(struct mlx4_dev *dev) mutex_lock(&intf_mutex); + dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; list_add_tail(&priv->dev_list, &dev_list); list_for_each_entry(intf, &intf_list, list) mlx4_add_device(intf, priv); @@ -162,6 +163,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) mlx4_remove_device(intf, priv); list_del(&priv->dev_list); + dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; mutex_unlock(&intf_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index dc2d910fcc88..d59cae5da3f0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3114,6 +3114,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, dev->persist); priv->pci_dev_data = id->driver_data; mutex_init(&dev->persist->device_state_mutex); + mutex_init(&dev->persist->interface_state_mutex); ret = __mlx4_init_one(pdev, id->driver_data, priv); if (ret) { @@ -3232,7 +3233,17 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); - mlx4_unload_one(pdev); + mutex_lock(&persist->interface_state_mutex); + persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; + mutex_unlock(&persist->interface_state_mutex); + + /* device marked to be under deletion running now without the lock + * letting other tasks to be terminated + */ + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + else + mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7d5d317cb7a6..33f9ca71925c 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -416,6 +416,11 @@ enum { MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1, }; +enum { + MLX4_INTERFACE_STATE_UP = 1 << 0, + MLX4_INTERFACE_STATE_DELETION = 1 << 1, +}; + #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) @@ -760,6 +765,8 @@ struct mlx4_dev_persistent { struct workqueue_struct *catas_wq; struct mutex device_state_mutex; /* protect HW state */ u8 state; + struct mutex interface_state_mutex; /* protect SW state */ + u8 interface_state; }; struct mlx4_dev { -- cgit v1.2.3 From 55ad359225b2232b9b8f04a0dfa169bd3a7d86d2 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 25 Jan 2015 16:59:42 +0200 Subject: net/mlx4_core: Enable device recovery flow with SRIOV In SRIOV, both the PF and the VF may attempt device recovery whenever they assume that the device is not functioning. When the PF driver resets the device, the VF should detect this and attempt to reinitialize itself. The VF must be able to reset itself under all circumstances, even if the PF is not responsive. The VF shall reset itself in the following cases: 1. Commands are not processed within reasonable time over the communication channel. This is done considering device state and the correct return code based on the command as was done in the native mode, done in the next patch. 2. The VF driver receives an internal error event reported by the PF on the communication channel. This occurs when the PF driver resets the device or when VF is out of sync with the PF. Add 'VF reset' capability, which allows the VF to reinitialize itself even when the PF is not responsive. As PF and VF may run their reset flow simulantanisly, there are several cases that are handled: - Prevent freeing VF resources upon FLR, when PF is in its unloading stage. - Prevent PF getting VF commands before it has finished initializing its resources. - Upon VF startup, check that comm-channel is online before sending commands to the PF and getting timed-out. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 120 +++++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx4/cmd.c | 77 ++++++++++++---- drivers/net/ethernet/mellanox/mlx4/eq.c | 10 ++- drivers/net/ethernet/mellanox/mlx4/intf.c | 6 +- drivers/net/ethernet/mellanox/mlx4/main.c | 135 +++++++++++++++++++++++------ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 7 +- include/linux/mlx4/cmd.h | 2 + include/linux/mlx4/device.h | 5 ++ 8 files changed, 292 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 3fcf3cfaedfc..715de8affcc9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -45,8 +45,7 @@ enum { int mlx4_internal_err_reset = 1; module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644); MODULE_PARM_DESC(internal_err_reset, - "Reset device on internal errors if non-zero" - " (default 1, in SRIOV mode default is 0)"); + "Reset device on internal errors if non-zero (default 1)"); static int read_vendor_id(struct mlx4_dev *dev) { @@ -71,6 +70,9 @@ static int mlx4_reset_master(struct mlx4_dev *dev) { int err = 0; + if (mlx4_is_master(dev)) + mlx4_report_internal_err_comm_event(dev); + if (!pci_channel_offline(dev->persist->pdev)) { err = read_vendor_id(dev); /* If PCI can't be accessed to read vendor ID we assume that its @@ -87,6 +89,81 @@ static int mlx4_reset_master(struct mlx4_dev *dev) return err; } +static int mlx4_reset_slave(struct mlx4_dev *dev) +{ +#define COM_CHAN_RST_REQ_OFFSET 0x10 +#define COM_CHAN_RST_ACK_OFFSET 0x08 + + u32 comm_flags; + u32 rst_req; + u32 rst_ack; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (pci_channel_offline(dev->persist->pdev)) + return 0; + + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + if (comm_flags == 0xffffffff) { + mlx4_err(dev, "VF reset is not needed\n"); + return 0; + } + + if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) { + mlx4_err(dev, "VF reset is not supported\n"); + return -EOPNOTSUPP; + } + + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + if (rst_req != rst_ack) { + mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n"); + return -EIO; + } + + rst_req ^= 1; + mlx4_warn(dev, "VF is sending reset request to Firmware\n"); + comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; + __raw_writel((__force u32)cpu_to_be32(comm_flags), + (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + + end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + + /* Reading rst_req again since the communication channel can + * be reset at any time by the PF and all its bits will be + * set to zero. + */ + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + + if (rst_ack == rst_req) { + mlx4_warn(dev, "VF Reset succeed\n"); + return 0; + } + cond_resched(); + } + mlx4_err(dev, "Fail to send reset over the communication channel\n"); + return -ETIMEDOUT; +} + +static int mlx4_comm_internal_err(u32 slave_read) +{ + return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == + (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; +} + void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) { int err; @@ -101,7 +178,10 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) dev = persist->dev; mlx4_err(dev, "device is going to be reset\n"); - err = mlx4_reset_master(dev); + if (mlx4_is_slave(dev)) + err = mlx4_reset_slave(dev); + else + err = mlx4_reset_master(dev); BUG_ON(err != 0); dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; @@ -148,8 +228,15 @@ static void poll_catas(unsigned long dev_ptr) { struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; struct mlx4_priv *priv = mlx4_priv(dev); - - if (readl(priv->catas_err.map)) { + u32 slave_read; + + if (mlx4_is_slave(dev)) { + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + if (mlx4_comm_internal_err(slave_read)) { + mlx4_warn(dev, "Internal error detected on the communication channel\n"); + goto internal_err; + } + } else if (readl(priv->catas_err.map)) { dump_err_buf(dev); goto internal_err; } @@ -182,22 +269,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); phys_addr_t addr; - /*If we are in SRIOV the default of the module param must be 0*/ - if (mlx4_is_mfunc(dev)) - mlx4_internal_err_reset = 0; - INIT_LIST_HEAD(&priv->catas_err.list); init_timer(&priv->catas_err.timer); priv->catas_err.map = NULL; - addr = pci_resource_start(dev->persist->pdev, priv->fw.catas_bar) + - priv->fw.catas_offset; - - priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); - if (!priv->catas_err.map) { - mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", - (unsigned long long) addr); - return; + if (!mlx4_is_slave(dev)) { + addr = pci_resource_start(dev->persist->pdev, + priv->fw.catas_bar) + + priv->fw.catas_offset; + + priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); + if (!priv->catas_err.map) { + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", + (unsigned long long)addr); + return; + } } priv->catas_err.timer.data = (unsigned long) dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 3895b2b5fc92..7652eed4bbc8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -729,7 +730,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, EXPORT_SYMBOL_GPL(__mlx4_cmd); -static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); @@ -1945,8 +1946,11 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, break; case MLX4_COMM_CMD_VHCR_POST: if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && - (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) + (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) { + mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n", + slave, cmd, slave_state[slave].last_cmd); goto reset_slave; + } mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_master_process_vhcr(dev, slave, NULL)) { @@ -1980,7 +1984,18 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, reset_slave: /* cleanup any slave resources */ - mlx4_delete_all_resources_for_slave(dev, slave); + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, slave); + + if (cmd != MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", + slave, cmd); + /* Turn on internal error letting slave reset itself immeditaly, + * otherwise it might take till timeout on command is passed + */ + reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR); + } + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (!slave_state[slave].is_slave_going_down) slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; @@ -2056,17 +2071,28 @@ void mlx4_master_comm_channel(struct work_struct *work) static int sync_toggles(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - int wr_toggle; - int rd_toggle; + u32 wr_toggle; + u32 rd_toggle; unsigned long end; - wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; - end = jiffies + msecs_to_jiffies(5000); + wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); + if (wr_toggle == 0xffffffff) + end = jiffies + msecs_to_jiffies(30000); + else + end = jiffies + msecs_to_jiffies(5000); while (time_before(jiffies, end)) { - rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; - if (rd_toggle == wr_toggle) { - priv->cmd.comm_toggle = rd_toggle; + rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); + if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { + /* PCI might be offline */ + msleep(100); + wr_toggle = swab32(readl(&priv->mfunc.comm-> + slave_write)); + continue; + } + + if (rd_toggle >> 31 == wr_toggle >> 31) { + priv->cmd.comm_toggle = rd_toggle >> 31; return 0; } @@ -2172,13 +2198,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) if (mlx4_init_resource_tracker(dev)) goto err_thread; - err = mlx4_ARM_COMM_CHANNEL(dev); - if (err) { - mlx4_err(dev, " Failed to arm comm channel eq: %x\n", - err); - goto err_resource; - } - } else { err = sync_toggles(dev); if (err) { @@ -2188,8 +2207,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) } return 0; -err_resource: - mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL); err_thread: flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); @@ -2266,6 +2283,27 @@ err: return -ENOMEM; } +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int slave; + u32 slave_read; + + /* Report an internal error event to all + * communication channels. + */ + for (slave = 0; slave < dev->num_slaves; slave++) { + slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read)); + slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; + __raw_writel((__force u32)cpu_to_be32(slave_read), + &priv->mfunc.comm[slave].slave_read); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + } +} + void mlx4_multi_func_cleanup(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2281,6 +2319,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) kfree(priv->mfunc.master.slave_state); kfree(priv->mfunc.master.vf_admin); kfree(priv->mfunc.master.vf_oper); + dev->num_slaves = 0; } iounmap(priv->mfunc.comm); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 7538c9ce98a9..2f2e6067426d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -429,8 +429,14 @@ void mlx4_master_handle_slave_flr(struct work_struct *work) if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", i); - - mlx4_delete_all_resources_for_slave(dev, i); + /* In case of 'Reset flow' FLR can be generated for + * a slave before mlx4_load_one is done. + * make sure interface is up before trying to delete + * slave resources which weren't allocated yet. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, i); /*return the slave to running mode*/ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index fba0b96a6f28..68d2bad325d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -144,8 +144,7 @@ int mlx4_register_device(struct mlx4_dev *dev) mlx4_add_device(intf, priv); mutex_unlock(&intf_mutex); - if (!mlx4_is_slave(dev)) - mlx4_start_catas_poll(dev); + mlx4_start_catas_poll(dev); return 0; } @@ -155,8 +154,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; - if (!mlx4_is_slave(dev)) - mlx4_stop_catas_poll(dev); + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6bb0fca137cd..1baf1f1e2866 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -108,6 +108,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe, MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ MLX4_FUNC_CAP_DMFS_A0_STATIC) +#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) + static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; @@ -1579,6 +1581,50 @@ static void mlx4_close_fw(struct mlx4_dev *dev) } } +static int mlx4_comm_check_offline(struct mlx4_dev *dev) +{ +#define COMM_CHAN_OFFLINE_OFFSET 0x09 + + u32 comm_flags; + u32 offline_bit; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + offline_bit = (comm_flags & + (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); + if (!offline_bit) + return 0; + /* There are cases as part of AER/Reset flow that PF needs + * around 100 msec to load. We therefore sleep for 100 msec + * to allow other tasks to make use of that CPU during this + * time interval. + */ + msleep(100); + } + mlx4_err(dev, "Communication channel is offline.\n"); + return -EIO; +} + +static void mlx4_reset_vf_support(struct mlx4_dev *dev) +{ +#define COMM_CHAN_RST_OFFSET 0x1e + + struct mlx4_priv *priv = mlx4_priv(dev); + u32 comm_rst; + u32 comm_caps; + + comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_CAPS)); + comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); + + if (comm_rst) + dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; +} + static int mlx4_init_slave(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1594,6 +1640,12 @@ static int mlx4_init_slave(struct mlx4_dev *dev) mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; + if (mlx4_comm_check_offline(dev)) { + mlx4_err(dev, "PF is not responsive, skipping initialization\n"); + goto err_offline; + } + + mlx4_reset_vf_support(dev); mlx4_warn(dev, "Sending reset\n"); ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME); @@ -1637,6 +1689,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev) err: mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); +err_offline: mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EIO; } @@ -2494,11 +2547,19 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) !!((flags) & MLX4_FLAG_MASTER)) static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, - u8 total_vfs, int existing_vfs) + u8 total_vfs, int existing_vfs, int reset_flow) { u64 dev_flags = dev->flags; int err = 0; + if (reset_flow) { + dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (!dev->dev_vfs) + goto free_mem; + return dev_flags; + } + atomic_inc(&pf_loading); if (dev->flags & MLX4_FLAG_SRIOV) { if (existing_vfs != total_vfs) { @@ -2533,6 +2594,7 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, disable_sriov: atomic_dec(&pf_loading); +free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); return dev_flags & ~MLX4_FLAG_MASTER; @@ -2557,7 +2619,8 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap } static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, - int total_vfs, int *nvfs, struct mlx4_priv *priv) + int total_vfs, int *nvfs, struct mlx4_priv *priv, + int reset_flow) { struct mlx4_dev *dev; unsigned sum = 0; @@ -2679,8 +2742,10 @@ slave_start: goto err_fw; if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { - u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, - existing_vfs); + u64 dev_flags = mlx4_enable_sriov(dev, pdev, + total_vfs, + existing_vfs, + reset_flow); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); dev->flags = dev_flags; @@ -2722,7 +2787,7 @@ slave_start: if (dev->flags & MLX4_FLAG_SRIOV) { if (!existing_vfs) pci_disable_sriov(pdev); - if (mlx4_is_master(dev)) + if (mlx4_is_master(dev) && !reset_flow) atomic_dec(&pf_loading); dev->flags &= ~MLX4_FLAG_SRIOV; } @@ -2736,7 +2801,8 @@ slave_start: } if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { - u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, + existing_vfs, reset_flow); if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); @@ -2848,6 +2914,17 @@ slave_start: goto err_steer; mlx4_init_quotas(dev); + /* When PF resources are ready arm its comm channel to enable + * getting commands + */ + if (mlx4_is_master(dev)) { + err = mlx4_ARM_COMM_CHANNEL(dev); + if (err) { + mlx4_err(dev, " Failed to arm comm channel eq: %x\n", + err); + goto err_steer; + } + } for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); @@ -2866,7 +2943,7 @@ slave_start: priv->removed = 0; - if (mlx4_is_master(dev) && dev->persist->num_vfs) + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) atomic_dec(&pf_loading); kfree(dev_cap); @@ -2925,10 +3002,12 @@ err_cmd: mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); err_sriov: - if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) + if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { pci_disable_sriov(pdev); + dev->flags &= ~MLX4_FLAG_SRIOV; + } - if (mlx4_is_master(dev) && dev->persist->num_vfs) + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) atomic_dec(&pf_loading); kfree(priv->dev.dev_vfs); @@ -3073,7 +3152,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, if (err) goto err_release_regions; - err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); if (err) goto err_catas; @@ -3131,9 +3210,11 @@ static void mlx4_clean_dev(struct mlx4_dev *dev) { struct mlx4_dev_persistent *persist = dev->persist; struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); memset(priv, 0, sizeof(*priv)); priv->dev.persist = persist; + priv->dev.flags = flags; } static void mlx4_unload_one(struct pci_dev *pdev) @@ -3143,7 +3224,6 @@ static void mlx4_unload_one(struct pci_dev *pdev) struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; int p, i; - int active_vfs = 0; if (priv->removed) return; @@ -3157,14 +3237,6 @@ static void mlx4_unload_one(struct pci_dev *pdev) pci_dev_data = priv->pci_dev_data; - /* Disabling SR-IOV is not allowed while there are active vf's */ - if (mlx4_is_master(dev)) { - active_vfs = mlx4_how_many_lives_vf(dev); - if (active_vfs) { - pr_warn("Removing PF when there are active VF's !!\n"); - pr_warn("Will not disable SR-IOV.\n"); - } - } mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -3208,12 +3280,6 @@ static void mlx4_unload_one(struct pci_dev *pdev) if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); - if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { - mlx4_warn(dev, "Disabling SR-IOV\n"); - pci_disable_sriov(pdev); - dev->flags &= ~MLX4_FLAG_SRIOV; - dev->persist->num_vfs = 0; - } if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); @@ -3235,11 +3301,21 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); + int active_vfs = 0; mutex_lock(&persist->interface_state_mutex); persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; mutex_unlock(&persist->interface_state_mutex); + /* Disabling SR-IOV is not allowed while there are active vf's */ + if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { + active_vfs = mlx4_how_many_lives_vf(dev); + if (active_vfs) { + pr_warn("Removing PF when there are active VF's !!\n"); + pr_warn("Will not disable SR-IOV.\n"); + } + } + /* device marked to be under deletion running now without the lock * letting other tasks to be terminated */ @@ -3248,6 +3324,11 @@ static void mlx4_remove_one(struct pci_dev *pdev) else mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); + if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { + mlx4_warn(dev, "Disabling SR-IOV\n"); + pci_disable_sriov(pdev); + } + pci_release_regions(pdev); pci_disable_device(pdev); kfree(dev->persist); @@ -3287,7 +3368,7 @@ int mlx4_restart_one(struct pci_dev *pdev) memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); - err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", __func__, pci_name(pdev), err); @@ -3397,7 +3478,7 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) mutex_lock(&persist->interface_state_mutex); if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, - priv); + priv, 1); if (ret) { mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", __func__, ret); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 5c772ea4473b..2a15b8248e77 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -85,7 +85,9 @@ enum { MLX4_CLR_INT_SIZE = 0x00008, MLX4_SLAVE_COMM_BASE = 0x0, MLX4_COMM_PAGESIZE = 0x1000, - MLX4_CLOCK_SIZE = 0x00008 + MLX4_CLOCK_SIZE = 0x00008, + MLX4_COMM_CHAN_CAPS = 0x8, + MLX4_COMM_CHAN_FLAGS = 0xc }; enum { @@ -120,6 +122,8 @@ enum mlx4_mpt_state { }; #define MLX4_COMM_TIME 10000 +#define MLX4_COMM_OFFLINE_TIME_OUT 30000 + enum { MLX4_COMM_CMD_RESET, MLX4_COMM_CMD_VHCR0, @@ -1162,6 +1166,7 @@ enum { int mlx4_cmd_init(struct mlx4_dev *dev); void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); int mlx4_multi_func_init(struct mlx4_dev *dev); +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev); void mlx4_multi_func_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); int mlx4_cmd_use_events(struct mlx4_dev *dev); diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index e7543844cc7a..c989442ffc6a 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -280,6 +280,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat int mlx4_config_dev_retrieval(struct mlx4_dev *dev, struct mlx4_config_dev_params *params); void mlx4_cmd_wake_completions(struct mlx4_dev *dev); +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) @@ -289,5 +290,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) +#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17) #endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 33f9ca71925c..5ef54e145e4d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -208,6 +208,10 @@ enum { MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 }; +enum { + MLX4_VF_CAP_FLAG_RESET = 1 << 0 +}; + /* bit enums for an 8-bit flags field indicating special use * QPs which require special handling in qp_reserve_range. * Currently, this only includes QPs used by the ETH interface, @@ -545,6 +549,7 @@ struct mlx4_caps { u8 alloc_res_qp_mask; u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; + u32 vf_caps; }; struct mlx4_buf_list { -- cgit v1.2.3 From c2943f14534bdc4230f4da6dcd4ea03c5d8c8162 Mon Sep 17 00:00:00 2001 From: Harout Hedeshian Date: Tue, 20 Jan 2015 10:06:05 -0700 Subject: net: ipv6: Add sysctl entry to disable MTU updates from RA The kernel forcefully applies MTU values received in router advertisements provided the new MTU is less than the current. This behavior is undesirable when the user space is managing the MTU. Instead a sysctl flag 'accept_ra_mtu' is introduced such that the user space can control whether or not RA provided MTU updates should be applied. The default behavior is unchanged; user space must explicitly set this flag to 0 for RA MTUs to be ignored. Signed-off-by: Harout Hedeshian Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 7 +++++++ include/linux/ipv6.h | 1 + include/uapi/linux/ipv6.h | 1 + net/ipv6/addrconf.c | 10 ++++++++++ net/ipv6/ndisc.c | 2 +- 5 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 85b022179104..a5e4c813f17f 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1287,6 +1287,13 @@ accept_ra_rtr_pref - BOOLEAN Functional default: enabled if accept_ra is enabled. disabled if accept_ra is disabled. +accept_ra_mtu - BOOLEAN + Apply the MTU value specified in RA option 5 (RFC4861). If + disabled, the MTU specified in the RA will be ignored. + + Functional default: enabled if accept_ra is enabled. + disabled if accept_ra is disabled. + accept_redirects - BOOLEAN Accept Redirects. diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index c694e7baa621..2805062c013f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -52,6 +52,7 @@ struct ipv6_devconf { __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; void *sysctl; }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 73cb02dc3065..437a6a4b125a 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -169,6 +169,7 @@ enum { DEVCONF_SUPPRESS_FRAG_NDISC, DEVCONF_ACCEPT_RA_FROM_LOCAL, DEVCONF_USE_OPTIMISTIC, + DEVCONF_ACCEPT_RA_MTU, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d6b4f5d08014..7dcc065e2160 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -201,6 +201,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .disable_ipv6 = 0, .accept_dad = 1, .suppress_frag_ndisc = 1, + .accept_ra_mtu = 1, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -238,6 +239,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .disable_ipv6 = 0, .accept_dad = 1, .suppress_frag_ndisc = 1, + .accept_ra_mtu = 1, }; /* Check if a valid qdisc is available */ @@ -4380,6 +4382,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify; array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc; array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local; + array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu; } static inline size_t inet6_ifla6_size(void) @@ -5258,6 +5261,13 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "accept_ra_mtu", + .data = &ipv6_devconf.accept_ra_mtu, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { /* sentinel */ } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 682866777d53..8a9d7c19e247 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1348,7 +1348,7 @@ skip_routeinfo: } } - if (ndopts.nd_opts_mtu) { + if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) { __be32 n; u32 mtu; -- cgit v1.2.3 From 607954b084d4ad5e6a2e0f795de7803d9c6ae37f Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 21 Jan 2015 11:12:13 +0000 Subject: rhashtable: fix rht_for_each_entry_safe() endless loop "next" is not updated, causing an endless loop for buckets with more than one element. Fixes: 88d6ed15acff ("rhashtable: Convert bucket iterators to take table and index") Signed-off-by: Patrick McHardy Acked-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index a2562ed53ea3..e0337844358e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -260,7 +260,9 @@ void rhashtable_destroy(struct rhashtable *ht); next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ - pos = next) + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** * rht_for_each_rcu_continue - continue iterating over rcu hash chain -- cgit v1.2.3 From 0b35fa7daefe9da6fdef91d95e07eebb714a8fcc Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sun, 25 Jan 2015 23:33:30 +0100 Subject: NFC: st21nfcb: Remove useless include include/linux/platform_data/st21nfcb.h is phy generic. There is no need to include linux/i2c.h Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- include/linux/platform_data/st21nfcb.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index c3b432f5b63e..05c54c958cc3 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -19,8 +19,6 @@ #ifndef _ST21NFCB_NCI_H_ #define _ST21NFCB_NCI_H_ -#include - #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { -- cgit v1.2.3 From 6da7c85c75eed769423b428eb654eaaf89d273c1 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Sun, 25 Jan 2015 23:33:31 +0100 Subject: NFC: st21nfcb: Fix copy/paste error in comment include/linux/platform_data/st21nfcb.h is based on include/linux/platform_data/st21nfca.h. The endif comment is inacurrate for st21nfcb. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- include/linux/platform_data/st21nfcb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 05c54c958cc3..b023373d9874 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -26,4 +26,4 @@ struct st21nfcb_nfc_platform_data { unsigned int irq_polarity; }; -#endif /* _ST21NFCA_HCI_H_ */ +#endif /* _ST21NFCB_NCI_H_ */ -- cgit v1.2.3 From aae88261abd58fffef7ee0e00160ce4ea105b0f3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 26 Jan 2015 22:05:38 -0800 Subject: net: phy: document has_fixups field has_fixups was introduced to help keeping track of fixups/quirks running on a PHY device, but we did not update the comment above struct phy_device accordingly. Fixes: b0ae009f3dc14 (net: phy: add "has_fixups" boolean property") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 9c189a1fa3a2..1b3690b597d5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -327,6 +327,7 @@ struct phy_c45_device_ids { * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. * is_internal: Set to true if this phy is internal to a MAC. + * has_fixups: Set to true if this phy has fixups/quirks. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * addr: Bus address of PHY -- cgit v1.2.3 From 8a477a6fb6a33651adda772360b85fd813569743 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 26 Jan 2015 22:05:39 -0800 Subject: net: phy: keep track of the PHY suspend state In order to avoid double calls to phydev->drv->suspend and resume, keep track of whether the PHY has already been suspended as a consequence of a successful call to phy_suspend(). We will use this in our MDIO bus suspend/resume hooks to avoid a double suspend call. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 22 ++++++++++++++++++---- include/linux/phy.h | 2 ++ 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 3fc91e89f5a5..bdfe51fc3a65 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -699,6 +699,7 @@ int phy_suspend(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + int ret = 0; /* If the device has WOL enabled, we cannot suspend the PHY */ phy_ethtool_get_wol(phydev, &wol); @@ -706,18 +707,31 @@ int phy_suspend(struct phy_device *phydev) return -EBUSY; if (phydrv->suspend) - return phydrv->suspend(phydev); - return 0; + ret = phydrv->suspend(phydev); + + if (ret) + return ret; + + phydev->suspended = true; + + return ret; } EXPORT_SYMBOL(phy_suspend); int phy_resume(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); + int ret = 0; if (phydrv->resume) - return phydrv->resume(phydev); - return 0; + ret = phydrv->resume(phydev); + + if (ret) + return ret; + + phydev->suspended = false; + + return ret; } EXPORT_SYMBOL(phy_resume); diff --git a/include/linux/phy.h b/include/linux/phy.h index 1b3690b597d5..685809835b5c 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -328,6 +328,7 @@ struct phy_c45_device_ids { * is_c45: Set to true if this phy uses clause 45 addressing. * is_internal: Set to true if this phy is internal to a MAC. * has_fixups: Set to true if this phy has fixups/quirks. + * suspended: Set to true if this phy has been suspended successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * addr: Bus address of PHY @@ -365,6 +366,7 @@ struct phy_device { bool is_c45; bool is_internal; bool has_fixups; + bool suspended; enum phy_state state; -- cgit v1.2.3 From cfcf1682c4ca8f601a4702255958e0b1c9aa12cc Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Sat, 24 Jan 2015 19:52:05 +0200 Subject: cfg80211: Add new GCMP, CCMP-256, BIP-GMAC, BIP-CMAC-256 ciphers This makes cfg80211 aware of the GCMP, GCMP-256, CCMP-256, BIP-GMAC-128, BIP-GMAC-256, and BIP-CMAC-256 cipher suites. These new cipher suites were defined in IEEE Std 802.11ac-2013. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 18 ++++++++++++++++++ net/wireless/util.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4f4eea8a6288..dbf417bf25bf 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1994,9 +1994,15 @@ enum ieee80211_key_len { WLAN_KEY_LEN_WEP40 = 5, WLAN_KEY_LEN_WEP104 = 13, WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_CCMP_256 = 32, WLAN_KEY_LEN_TKIP = 32, WLAN_KEY_LEN_AES_CMAC = 16, WLAN_KEY_LEN_SMS4 = 32, + WLAN_KEY_LEN_GCMP = 16, + WLAN_KEY_LEN_GCMP_256 = 32, + WLAN_KEY_LEN_BIP_CMAC_256 = 32, + WLAN_KEY_LEN_BIP_GMAC_128 = 16, + WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; #define IEEE80211_WEP_IV_LEN 4 @@ -2004,9 +2010,16 @@ enum ieee80211_key_len { #define IEEE80211_CCMP_HDR_LEN 8 #define IEEE80211_CCMP_MIC_LEN 8 #define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_CCMP_256_HDR_LEN 8 +#define IEEE80211_CCMP_256_MIC_LEN 16 +#define IEEE80211_CCMP_256_PN_LEN 6 #define IEEE80211_TKIP_IV_LEN 8 #define IEEE80211_TKIP_ICV_LEN 4 #define IEEE80211_CMAC_PN_LEN 6 +#define IEEE80211_GMAC_PN_LEN 6 +#define IEEE80211_GCMP_HDR_LEN 8 +#define IEEE80211_GCMP_MIC_LEN 16 +#define IEEE80211_GCMP_PN_LEN 6 /* Public action codes */ enum ieee80211_pub_actioncode { @@ -2230,6 +2243,11 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 #define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 +#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 +#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D #define WLAN_CIPHER_SUITE_SMS4 0x00147201 diff --git a/net/wireless/util.c b/net/wireless/util.c index 08f136ad2ea5..919fee807dd9 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -230,6 +230,9 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, switch (params->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: /* Disallow pairwise keys with non-zero index unless it's WEP * or a vendor specific cipher (because current deployments use * pairwise WEP keys with non-zero indices and for vendor @@ -240,6 +243,9 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, return -EINVAL; break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: /* Disallow BIP (group-only) cipher as pairwise cipher */ if (pairwise) return -EINVAL; @@ -261,6 +267,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, if (params->key_len != WLAN_KEY_LEN_CCMP) return -EINVAL; break; + case WLAN_CIPHER_SUITE_CCMP_256: + if (params->key_len != WLAN_KEY_LEN_CCMP_256) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_GCMP: + if (params->key_len != WLAN_KEY_LEN_GCMP) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + if (params->key_len != WLAN_KEY_LEN_GCMP_256) + return -EINVAL; + break; case WLAN_CIPHER_SUITE_WEP104: if (params->key_len != WLAN_KEY_LEN_WEP104) return -EINVAL; @@ -269,6 +287,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, if (params->key_len != WLAN_KEY_LEN_AES_CMAC) return -EINVAL; break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256) + return -EINVAL; + break; default: /* * We don't know anything about this algorithm, @@ -288,7 +318,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (params->seq_len != 6) return -EINVAL; break; -- cgit v1.2.3 From 56c52da2d554f081e8fce58ecbcf6a40c605b95b Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Sat, 24 Jan 2015 19:52:08 +0200 Subject: mac80111: Add BIP-CMAC-256 cipher This allows mac80211 to configure BIP-CMAC-256 to the driver and also use software-implementation within mac80211 when the driver does not support this with hardware accelaration. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 9 +++++ net/mac80211/aes_cmac.c | 34 +++++++++++++---- net/mac80211/aes_cmac.h | 5 ++- net/mac80211/cfg.c | 2 + net/mac80211/debugfs_key.c | 4 ++ net/mac80211/key.c | 14 ++++++- net/mac80211/main.c | 13 ++++--- net/mac80211/rx.c | 21 ++++++++--- net/mac80211/tx.c | 3 ++ net/mac80211/wpa.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/wpa.h | 4 ++ 11 files changed, 180 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index dbf417bf25bf..b9c7897dc566 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1017,6 +1017,15 @@ struct ieee80211_mmie { u8 mic[8]; } __packed; +/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */ +struct ieee80211_mmie_16 { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[16]; +} __packed; + struct ieee80211_vendor_ie { u8 element_id; u8 len; diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c index 9b9009f99551..4192806be3d3 100644 --- a/net/mac80211/aes_cmac.c +++ b/net/mac80211/aes_cmac.c @@ -18,8 +18,8 @@ #include "key.h" #include "aes_cmac.h" -#define AES_CMAC_KEY_LEN 16 #define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ +#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */ #define AAD_LEN 20 @@ -35,9 +35,9 @@ static void gf_mulx(u8 *pad) pad[AES_BLOCK_SIZE - 1] ^= 0x87; } - -static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, - const u8 *addr[], const size_t *len, u8 *mac) +static void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, + const u8 *addr[], const size_t *len, u8 *mac, + size_t mac_len) { u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE]; const u8 *pos, *end; @@ -88,7 +88,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, for (i = 0; i < AES_BLOCK_SIZE; i++) pad[i] ^= cbc[i]; crypto_cipher_encrypt_one(tfm, pad, pad); - memcpy(mac, pad, CMAC_TLEN); + memcpy(mac, pad, mac_len); } @@ -107,17 +107,35 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, addr[2] = zero; len[2] = CMAC_TLEN; - aes_128_cmac_vector(tfm, 3, addr, len, mic); + aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN); } +void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic) +{ + const u8 *addr[3]; + size_t len[3]; + u8 zero[CMAC_TLEN_256]; + + memset(zero, 0, CMAC_TLEN_256); + addr[0] = aad; + len[0] = AAD_LEN; + addr[1] = data; + len[1] = data_len - CMAC_TLEN_256; + addr[2] = zero; + len[2] = CMAC_TLEN_256; + + aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256); +} -struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]) +struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len) { struct crypto_cipher *tfm; tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (!IS_ERR(tfm)) - crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN); + crypto_cipher_setkey(tfm, key, key_len); return tfm; } diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h index 0ce6487af795..3702041f44fd 100644 --- a/net/mac80211/aes_cmac.h +++ b/net/mac80211/aes_cmac.h @@ -11,9 +11,12 @@ #include -struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]); +struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[], + size_t key_len); void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic); +void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic); void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm); #endif /* AES_CMAC_H */ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ef84441c119c..b7e528bbecce 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -164,6 +164,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; @@ -362,6 +363,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, params.seq_len = 6; break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: pn64 = atomic64_read(&key->u.aes_cmac.tx_pn); seq[0] = pn64; seq[1] = pn64 >> 8; diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 64de07b16092..d1b60eb014a8 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -101,6 +101,7 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: pn = atomic64_read(&key->u.aes_cmac.tx_pn); len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24), @@ -153,6 +154,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, len = p - buf; break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: rpn = key->u.aes_cmac.rx_pn; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", @@ -191,6 +193,7 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf, len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.replays); break; @@ -214,6 +217,7 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf, switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: len = scnprintf(buf, sizeof(buf), "%u\n", key->u.aes_cmac.icverrors); break; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 83c61085c3f0..7ceea9d9fcd2 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -165,6 +165,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* all of these we can do in software - if driver can */ @@ -417,8 +418,12 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, } break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->conf.iv_len = 0; - key->conf.icv_len = sizeof(struct ieee80211_mmie); + if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) + key->conf.icv_len = sizeof(struct ieee80211_mmie); + else + key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = @@ -428,7 +433,7 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, * it does not need to be initialized for every packet. */ key->u.aes_cmac.tfm = - ieee80211_aes_cmac_key_setup(key_data); + ieee80211_aes_cmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_cmac.tfm)) { err = PTR_ERR(key->u.aes_cmac.tfm); kfree(key); @@ -481,6 +486,7 @@ static void ieee80211_key_free_common(struct ieee80211_key *key) ieee80211_aes_key_free(key->u.ccmp.tfm); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); break; case WLAN_CIPHER_SUITE_GCMP: @@ -804,6 +810,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, seq->ccmp.pn[0] = pn64 >> 40; break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: pn64 = atomic64_read(&key->u.aes_cmac.tx_pn); seq->ccmp.pn[5] = pn64; seq->ccmp.pn[4] = pn64 >> 8; @@ -854,6 +861,7 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; @@ -897,6 +905,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, atomic64_set(&key->u.ccmp.tx_pn, pn64); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: pn64 = (u64)seq->aes_cmac.pn[5] | ((u64)seq->aes_cmac.pn[4] << 8) | ((u64)seq->aes_cmac.pn[3] << 16) | @@ -948,6 +957,7 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a5ad2d5bb29b..053a17c5023a 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -671,7 +671,8 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) WLAN_CIPHER_SUITE_GCMP_256, /* keep last -- depends on hw flags! */ - WLAN_CIPHER_SUITE_AES_CMAC + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, }; if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL || @@ -710,7 +711,7 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!have_mfp) - local->hw.wiphy->n_cipher_suites--; + local->hw.wiphy->n_cipher_suites -= 2; if (!have_wep) { local->hw.wiphy->cipher_suites += 2; @@ -736,9 +737,9 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) if (have_wep) n_suites += 2; - /* check if we have AES_CMAC */ + /* check if we have AES_CMAC, BIP-CMAC-256 */ if (have_mfp) - n_suites++; + n_suites += 2; suites = kmalloc(sizeof(u32) * n_suites, GFP_KERNEL); if (!suites) @@ -755,8 +756,10 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) suites[w++] = WLAN_CIPHER_SUITE_WEP104; } - if (have_mfp) + if (have_mfp) { suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC; + suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256; + } for (r = 0; r < local->hw.n_cipher_schemes; r++) suites[w++] = cs[r].cipher; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e8c6ba5ce70b..93ebc9525478 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -647,6 +647,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) { struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; struct ieee80211_mmie *mmie; + struct ieee80211_mmie_16 *mmie16; if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) return -1; @@ -656,11 +657,18 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); - if (mmie->element_id != WLAN_EID_MMIE || - mmie->length != sizeof(*mmie) - 2) - return -1; - - return le16_to_cpu(mmie->key_id); + if (mmie->element_id == WLAN_EID_MMIE && + mmie->length == sizeof(*mmie) - 2) + return le16_to_cpu(mmie->key_id); + + mmie16 = (struct ieee80211_mmie_16 *) + (skb->data + skb->len - sizeof(*mmie16)); + if (skb->len >= 24 + sizeof(*mmie16) && + mmie16->element_id == WLAN_EID_MMIE && + mmie16->length == sizeof(*mmie16) - 2) + return le16_to_cpu(mmie16->key_id); + + return -1; } static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, @@ -1660,6 +1668,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) case WLAN_CIPHER_SUITE_AES_CMAC: result = ieee80211_crypto_aes_cmac_decrypt(rx); break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + result = ieee80211_crypto_aes_cmac_256_decrypt(rx); + break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: result = ieee80211_crypto_gcmp_decrypt(rx); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index be57e0afd019..909c27be1fdc 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -639,6 +639,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) ieee80211_is_mgmt(hdr->frame_control); break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (!ieee80211_is_mgmt(hdr->frame_control)) tx->key = NULL; break; @@ -1021,6 +1022,8 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) tx, IEEE80211_CCMP_256_MIC_LEN); case WLAN_CIPHER_SUITE_AES_CMAC: return ieee80211_crypto_aes_cmac_encrypt(tx); + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + return ieee80211_crypto_aes_cmac_256_encrypt(tx); case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: return ieee80211_crypto_gcmp_encrypt(tx); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index ae654de9782a..549af118de9f 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -955,6 +955,48 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) return TX_CONTINUE; } +ieee80211_tx_result +ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct ieee80211_key *key = tx->key; + struct ieee80211_mmie_16 *mmie; + u8 aad[20]; + u64 pn64; + + if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) + return TX_DROP; + + skb = skb_peek(&tx->skbs); + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key) + return TX_CONTINUE; + + if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) + return TX_DROP; + + mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie)); + mmie->element_id = WLAN_EID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = cpu_to_le16(key->conf.keyidx); + + /* PN = PN + 1 */ + pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn); + + bip_ipn_set64(mmie->sequence_number, pn64); + + bip_aad(skb, aad); + + /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128) + */ + ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mmie->mic); + + return TX_CONTINUE; +} ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) @@ -1006,6 +1048,56 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) return RX_CONTINUE; } +ieee80211_rx_result +ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_key *key = rx->key; + struct ieee80211_mmie_16 *mmie; + u8 aad[20], mic[16], ipn[6]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_CONTINUE; + + /* management frames are already linear */ + + if (skb->len < 24 + sizeof(*mmie)) + return RX_DROP_UNUSABLE; + + mmie = (struct ieee80211_mmie_16 *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return RX_DROP_UNUSABLE; /* Invalid MMIE */ + + bip_ipn_swap(ipn, mmie->sequence_number); + + if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { + key->u.aes_cmac.replays++; + return RX_DROP_UNUSABLE; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + bip_aad(skb, aad); + ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mic); + if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + key->u.aes_cmac.icverrors++; + return RX_DROP_UNUSABLE; + } + } + + memcpy(key->u.aes_cmac.rx_pn, ipn, 6); + + /* Remove MMIE */ + skb_trim(skb, skb->len - sizeof(*mmie)); + + return RX_CONTINUE; +} + ieee80211_tx_result ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx) { diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h index 43e109f27a89..06b7f167a176 100644 --- a/net/mac80211/wpa.h +++ b/net/mac80211/wpa.h @@ -32,8 +32,12 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx); +ieee80211_tx_result +ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx); +ieee80211_rx_result +ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx); ieee80211_tx_result ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx); ieee80211_rx_result -- cgit v1.2.3 From 2130fb97fecf9a51bb4a21da220cff3f72496a94 Mon Sep 17 00:00:00 2001 From: Christophe Ricard Date: Tue, 27 Jan 2015 01:18:19 +0100 Subject: NFC: st21nfca: Adding support for secure element st21nfca has 1 physical SWP line and can support up to 2 secure elements (UICC & eSE) thanks to an external switch managed with a gpio. The platform integrator needs to specify thanks to 2 initialization properties, uicc-present and ese-present, if it is suppose to have uicc and/or ese. Of course if the platform does not have an external switch, only one kind of secure element can be supported. Those parameters are under platform integrator responsibilities. During initialization, the white_list will be set according to those parameters. The discovery_se function will assume a secure element is physically present according to uicc-present and ese-present values and will add it to the secure element list. On ese activation, the atr is retrieved to calculate a command exchange timeout based on the first atr(TB) value. The se_io will allow to transfer data over SWP. 2 kind of events may appear after a data is sent over: - ST21NFCA_EVT_TRANSMIT_DATA when receiving an apdu answer - ST21NFCA_EVT_WTX_REQUEST when the secure element needs more time than expected to compute a command. If this timeout expired, a first recovery tentative consist to send a simple software reset proprietary command. If this tentative still fail, a second recovery tentative consist to send a hardware reset proprietary command. This function is only relevant for eSE like secure element. This patch also change the way a pipe is referenced. There can be different pipe connected to the same gate with different host destination (ex: CONNECTIVITY). In order to keep host information every pipe are reference with a tuple (gate, host). In order to reduce changes, we are keeping unchanged the way a gate is addressed on the Terminal Host. However, this is working because we consider the apdu reader gate is only present on the eSE slot also the connectivity gate cannot give a reliable value; it will give the latest stored pipe value. Signed-off-by: Christophe Ricard Signed-off-by: Samuel Ortiz --- drivers/nfc/st21nfca/Makefile | 2 +- drivers/nfc/st21nfca/i2c.c | 17 +- drivers/nfc/st21nfca/st21nfca.c | 134 +++++++++-- drivers/nfc/st21nfca/st21nfca.h | 21 +- drivers/nfc/st21nfca/st21nfca_se.c | 390 +++++++++++++++++++++++++++++++++ drivers/nfc/st21nfca/st21nfca_se.h | 63 ++++++ include/linux/platform_data/st21nfca.h | 2 + 7 files changed, 608 insertions(+), 21 deletions(-) create mode 100644 drivers/nfc/st21nfca/st21nfca_se.c create mode 100644 drivers/nfc/st21nfca/st21nfca_se.h (limited to 'include/linux') diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile index 7d688f97aa27..97edab4bbdf8 100644 --- a/drivers/nfc/st21nfca/Makefile +++ b/drivers/nfc/st21nfca/Makefile @@ -2,7 +2,7 @@ # Makefile for ST21NFCA HCI based NFC driver # -st21nfca_hci-objs = st21nfca.o st21nfca_dep.o +st21nfca_hci-objs = st21nfca.o st21nfca_dep.o st21nfca_se.o obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o st21nfca_i2c-objs = i2c.o diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 82b82dbd2997..a32143951616 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -74,6 +74,8 @@ struct st21nfca_i2c_phy { unsigned int gpio_ena; unsigned int irq_polarity; + struct st21nfca_se_status se_status; + struct sk_buff *pending_skb; int current_read_len; /* @@ -537,6 +539,11 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) phy->irq_polarity = irq_get_trigger_type(client->irq); + phy->se_status.is_ese_present = + of_property_read_bool(pp, "ese-present"); + phy->se_status.is_uicc_present = + of_property_read_bool(pp, "uicc-present"); + return 0; } #else @@ -571,6 +578,9 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) } } + phy->se_status.is_ese_present = pdata->is_ese_present; + phy->se_status.is_uicc_present = pdata->is_uicc_present; + return 0; } @@ -638,8 +648,11 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, } return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, - ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM, - ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev); + ST21NFCA_FRAME_HEADROOM, + ST21NFCA_FRAME_TAILROOM, + ST21NFCA_HCI_LLC_MAX_PAYLOAD, + &phy->hdev, + &phy->se_status); } static int st21nfca_hci_i2c_remove(struct i2c_client *client) diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c index 7cb7ce47d2af..24d3d240d5f4 100644 --- a/drivers/nfc/st21nfca/st21nfca.c +++ b/drivers/nfc/st21nfca/st21nfca.c @@ -23,6 +23,7 @@ #include "st21nfca.h" #include "st21nfca_dep.h" +#include "st21nfca_se.h" #define DRIVER_DESC "HCI NFC driver for ST21NFCA" @@ -62,7 +63,6 @@ #define ST21NFCA_RF_CARD_F_DATARATE 0x08 #define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01 -#define ST21NFCA_DEVICE_MGNT_GATE 0x01 #define ST21NFCA_DEVICE_MGNT_PIPE 0x02 #define ST21NFCA_DM_GETINFO 0x13 @@ -78,6 +78,11 @@ #define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/ +#define ST21NFCA_EVT_HOT_PLUG 0x03 +#define ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80) + +#define ST21NFCA_SE_TO_PIPES 2000 + static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES); static struct nfc_hci_gate st21nfca_gates[] = { @@ -92,6 +97,10 @@ static struct nfc_hci_gate st21nfca_gates[] = { {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_CARD_F_GATE, NFC_HCI_INVALID_PIPE}, + + /* Secure element pipes are created by secure element host */ + {ST21NFCA_CONNECTIVITY_GATE, NFC_HCI_DO_NOT_CREATE_PIPE}, + {ST21NFCA_APDU_READER_GATE, NFC_HCI_DO_NOT_CREATE_PIPE}, }; struct st21nfca_pipe_info { @@ -136,7 +145,8 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) * Pipe can be closed and need to be open. */ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, - ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE); + ST21NFCA_DEVICE_MGNT_GATE, + ST21NFCA_DEVICE_MGNT_PIPE); if (r < 0) goto free_info; @@ -167,17 +177,28 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) * - destination gid (1byte) */ info = (struct st21nfca_pipe_info *) skb_pipe_info->data; + if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE && + info->src_host_id != ST21NFCA_ESE_HOST_ID) { + pr_err("Unexpected apdu_reader pipe on host %x\n", + info->src_host_id); + continue; + } + for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) && - (st21nfca_gates[j].gate != info->dst_gate_id); - j++) + (st21nfca_gates[j].gate != info->dst_gate_id) ; j++) ; if (j < ARRAY_SIZE(st21nfca_gates) && st21nfca_gates[j].gate == info->dst_gate_id && ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) { st21nfca_gates[j].pipe = pipe_info[2]; + hdev->gate2pipe[st21nfca_gates[j].gate] = - st21nfca_gates[j].pipe; + st21nfca_gates[j].pipe; + hdev->pipes[st21nfca_gates[j].pipe].gate = + st21nfca_gates[j].gate; + hdev->pipes[st21nfca_gates[j].pipe].dest_host = + info->src_host_id; } } @@ -187,7 +208,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) */ if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) { for (i = skb_pipe_list->len + 3; - i < ARRAY_SIZE(st21nfca_gates); i++) { + i < ARRAY_SIZE(st21nfca_gates) - 2; i++) { r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, st21nfca_gates[i].gate, @@ -244,16 +265,33 @@ out: static int st21nfca_hci_ready(struct nfc_hci_dev *hdev) { + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); struct sk_buff *skb; u8 param; + u8 white_list[2]; + int wl_size = 0; int r; - param = NFC_HCI_UICC_HOST_ID; - r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, - NFC_HCI_ADMIN_WHITELIST, ¶m, 1); - if (r < 0) - return r; + if (info->se_status->is_ese_present && + info->se_status->is_uicc_present) { + white_list[wl_size++] = NFC_HCI_UICC_HOST_ID; + white_list[wl_size++] = ST21NFCA_ESE_HOST_ID; + } else if (!info->se_status->is_ese_present && + info->se_status->is_uicc_present) { + white_list[wl_size++] = NFC_HCI_UICC_HOST_ID; + } else if (info->se_status->is_ese_present && + !info->se_status->is_uicc_present) { + white_list[wl_size++] = ST21NFCA_ESE_HOST_ID; + } + + if (wl_size) { + r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_WHITELIST, + (u8 *) &white_list, wl_size); + if (r < 0) + return r; + } /* Set NFC_MODE in device management gate to enable */ r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE, @@ -821,19 +859,79 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev, } } +static void st21nfca_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + u8 gate = hdev->pipes[pipe].gate; + + pr_debug("cmd: %x\n", cmd); + + switch (cmd) { + case NFC_HCI_ANY_OPEN_PIPE: + if (gate != ST21NFCA_APDU_READER_GATE && + hdev->pipes[pipe].dest_host != NFC_HCI_UICC_HOST_ID) + info->se_info.count_pipes++; + + if (info->se_info.count_pipes == info->se_info.expected_pipes) { + del_timer_sync(&info->se_info.se_active_timer); + info->se_info.se_active = false; + info->se_info.count_pipes = 0; + complete(&info->se_info.req_completion); + } + break; + } +} + +static int st21nfca_admin_event_received(struct nfc_hci_dev *hdev, u8 event, + struct sk_buff *skb) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + pr_debug("admin event: %x\n", event); + + switch (event) { + case ST21NFCA_EVT_HOT_PLUG: + if (info->se_info.se_active) { + if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) { + del_timer_sync(&info->se_info.se_active_timer); + info->se_info.se_active = false; + complete(&info->se_info.req_completion); + } else { + mod_timer(&info->se_info.se_active_timer, + jiffies + + msecs_to_jiffies(ST21NFCA_SE_TO_PIPES)); + } + } + break; + } + kfree_skb(skb); + return 0; +} + /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ -static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, +static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { + u8 gate = hdev->pipes[pipe].gate; + u8 host = hdev->pipes[pipe].dest_host; + pr_debug("hci event: %d gate: %x\n", event, gate); switch (gate) { + case NFC_HCI_ADMIN_GATE: + return st21nfca_admin_event_received(hdev, event, skb); case ST21NFCA_RF_CARD_F_GATE: return st21nfca_dep_event_received(hdev, event, skb); + case ST21NFCA_CONNECTIVITY_GATE: + return st21nfca_connectivity_event_received(hdev, host, + event, skb); + case ST21NFCA_APDU_READER_GATE: + return st21nfca_apdu_reader_event_received(hdev, event, skb); default: return 1; } @@ -855,11 +953,17 @@ static struct nfc_hci_ops st21nfca_hci_ops = { .tm_send = st21nfca_hci_tm_send, .check_presence = st21nfca_hci_check_presence, .event_received = st21nfca_hci_event_received, + .cmd_received = st21nfca_hci_cmd_received, + .discover_se = st21nfca_hci_discover_se, + .enable_se = st21nfca_hci_enable_se, + .disable_se = st21nfca_hci_disable_se, + .se_io = st21nfca_hci_se_io, }; int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, - int phy_payload, struct nfc_hci_dev **hdev) + int phy_payload, struct nfc_hci_dev **hdev, + struct st21nfca_se_status *se_status) { struct st21nfca_hci_info *info; int r = 0; @@ -919,6 +1023,8 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, goto err_alloc_hdev; } + info->se_status = se_status; + nfc_hci_set_clientdata(info->hdev, info); r = nfc_hci_register_device(info->hdev); @@ -927,6 +1033,7 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, *hdev = info->hdev; st21nfca_dep_init(info->hdev); + st21nfca_se_init(info->hdev); return 0; @@ -945,6 +1052,7 @@ void st21nfca_hci_remove(struct nfc_hci_dev *hdev) struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); st21nfca_dep_deinit(hdev); + st21nfca_se_deinit(hdev); nfc_hci_unregister_device(hdev); nfc_hci_free_device(hdev); kfree(info); diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h index 7c2a85292230..15a78d330a9f 100644 --- a/drivers/nfc/st21nfca/st21nfca.h +++ b/drivers/nfc/st21nfca/st21nfca.h @@ -20,6 +20,7 @@ #include #include "st21nfca_dep.h" +#include "st21nfca_se.h" #define HCI_MODE 0 @@ -51,9 +52,15 @@ #define ST21NFCA_NUM_DEVICES 256 +struct st21nfca_se_status { + bool is_ese_present; + bool is_uicc_present; +}; + int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, - int phy_payload, struct nfc_hci_dev **hdev); + int phy_payload, struct nfc_hci_dev **hdev, + struct st21nfca_se_status *se_status); void st21nfca_hci_remove(struct nfc_hci_dev *hdev); enum st21nfca_state { @@ -66,6 +73,7 @@ struct st21nfca_hci_info { void *phy_id; struct nfc_hci_dev *hdev; + struct st21nfca_se_status *se_status; enum st21nfca_state state; @@ -76,13 +84,16 @@ struct st21nfca_hci_info { void *async_cb_context; struct st21nfca_dep_info dep_info; + struct st21nfca_se_info se_info; }; /* Reader RF commands */ -#define ST21NFCA_WR_XCHG_DATA 0x10 - -#define ST21NFCA_RF_READER_F_GATE 0x14 +#define ST21NFCA_WR_XCHG_DATA 0x10 -#define ST21NFCA_RF_CARD_F_GATE 0x24 +#define ST21NFCA_DEVICE_MGNT_GATE 0x01 +#define ST21NFCA_RF_READER_F_GATE 0x14 +#define ST21NFCA_RF_CARD_F_GATE 0x24 +#define ST21NFCA_APDU_READER_GATE 0xf0 +#define ST21NFCA_CONNECTIVITY_GATE 0x41 #endif /* __LOCAL_ST21NFCA_H_ */ diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/st21nfca_se.c new file mode 100644 index 000000000000..9b93d3904ab5 --- /dev/null +++ b/drivers/nfc/st21nfca/st21nfca_se.c @@ -0,0 +1,390 @@ +/* + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include + +#include "st21nfca.h" +#include "st21nfca_se.h" + +#define ST21NFCA_EVT_UICC_ACTIVATE 0x10 +#define ST21NFCA_EVT_UICC_DEACTIVATE 0x13 +#define ST21NFCA_EVT_SE_HARD_RESET 0x20 +#define ST21NFCA_EVT_SE_SOFT_RESET 0x11 +#define ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER 0x21 +#define ST21NFCA_EVT_SE_ACTIVATE 0x22 +#define ST21NFCA_EVT_SE_DEACTIVATE 0x23 + +#define ST21NFCA_EVT_TRANSMIT_DATA 0x10 +#define ST21NFCA_EVT_WTX_REQUEST 0x11 + +#define ST21NFCA_EVT_CONNECTIVITY 0x10 +#define ST21NFCA_EVT_TRANSACTION 0x12 + +#define ST21NFCA_ESE_HOST_ID 0xc0 + +#define ST21NFCA_SE_TO_HOT_PLUG 1000 +/* Connectivity pipe only */ +#define ST21NFCA_SE_COUNT_PIPE_UICC 0x01 +/* Connectivity + APDU Reader pipe */ +#define ST21NFCA_SE_COUNT_PIPE_EMBEDDED 0x02 + +#define ST21NFCA_SE_MODE_OFF 0x00 +#define ST21NFCA_SE_MODE_ON 0x01 + +#define ST21NFCA_PARAM_ATR 0x01 +#define ST21NFCA_ATR_DEFAULT_BWI 0x04 + +/* + * WT = 2^BWI/10[s], convert into msecs and add a secure + * room by increasing by 2 this timeout + */ +#define ST21NFCA_BWI_TO_TIMEOUT(x) ((1 << x) * 200) +#define ST21NFCA_ATR_GET_Y_FROM_TD(x) (x >> 4) + +/* If TA is present bit 0 is set */ +#define ST21NFCA_ATR_TA_PRESENT(x) (x & 0x01) +/* If TB is present bit 1 is set */ +#define ST21NFCA_ATR_TB_PRESENT(x) (x & 0x02) + +static u8 st21nfca_se_get_bwi(struct nfc_hci_dev *hdev) +{ + int i; + u8 td; + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */ + for (i = 1; i < ST21NFCA_ESE_MAX_LENGTH; i++) { + td = ST21NFCA_ATR_GET_Y_FROM_TD(info->se_info.atr[i]); + if (ST21NFCA_ATR_TA_PRESENT(td)) + i++; + if (ST21NFCA_ATR_TB_PRESENT(td)) { + i++; + return info->se_info.atr[i] >> 4; + } + } + return ST21NFCA_ATR_DEFAULT_BWI; +} + +static void st21nfca_se_get_atr(struct nfc_hci_dev *hdev) +{ + int r; + struct sk_buff *skb; + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + r = nfc_hci_get_param(hdev, ST21NFCA_APDU_READER_GATE, + ST21NFCA_PARAM_ATR, &skb); + if (r < 0) + return; + + if (skb->len <= ST21NFCA_ESE_MAX_LENGTH) { + memcpy(info->se_info.atr, skb->data, skb->len); + info->se_info.wt_timeout = + ST21NFCA_BWI_TO_TIMEOUT(st21nfca_se_get_bwi(hdev)); + } + kfree_skb(skb); +} + +static int st21nfca_hci_control_se(struct nfc_hci_dev *hdev, u32 se_idx, + u8 state) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + int r; + struct sk_buff *sk_host_list; + u8 se_event, host_id; + + switch (se_idx) { + case NFC_HCI_UICC_HOST_ID: + se_event = (state == ST21NFCA_SE_MODE_ON ? + ST21NFCA_EVT_UICC_ACTIVATE : + ST21NFCA_EVT_UICC_DEACTIVATE); + + info->se_info.count_pipes = 0; + info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_UICC; + break; + case ST21NFCA_ESE_HOST_ID: + se_event = (state == ST21NFCA_SE_MODE_ON ? + ST21NFCA_EVT_SE_ACTIVATE : + ST21NFCA_EVT_SE_DEACTIVATE); + + info->se_info.count_pipes = 0; + info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_EMBEDDED; + break; + default: + return -EINVAL; + } + + /* + * Wait for an EVT_HOT_PLUG in order to + * retrieve a relevant host list. + */ + reinit_completion(&info->se_info.req_completion); + r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, se_event, + NULL, 0); + if (r < 0) + return r; + + mod_timer(&info->se_info.se_active_timer, jiffies + + msecs_to_jiffies(ST21NFCA_SE_TO_HOT_PLUG)); + info->se_info.se_active = true; + + /* Ignore return value and check in any case the host_list */ + wait_for_completion_interruptible(&info->se_info.req_completion); + + r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_HOST_LIST, + &sk_host_list); + if (r < 0) + return r; + + host_id = sk_host_list->data[sk_host_list->len - 1]; + kfree_skb(sk_host_list); + + if (state == ST21NFCA_SE_MODE_ON && host_id == se_idx) + return se_idx; + else if (state == ST21NFCA_SE_MODE_OFF && host_id != se_idx) + return se_idx; + + return -1; +} + +int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + int se_count = 0; + + if (info->se_status->is_uicc_present) { + nfc_add_se(hdev->ndev, NFC_HCI_UICC_HOST_ID, NFC_SE_UICC); + se_count++; + } + + if (info->se_status->is_ese_present) { + nfc_add_se(hdev->ndev, ST21NFCA_ESE_HOST_ID, NFC_SE_EMBEDDED); + se_count++; + } + + return !se_count; +} +EXPORT_SYMBOL(st21nfca_hci_discover_se); + +int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx) +{ + int r; + + /* + * According to upper layer, se_idx == NFC_SE_UICC when + * info->se_status->is_uicc_enable is true should never happen. + * Same for eSE. + */ + r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_ON); + + if (r == ST21NFCA_ESE_HOST_ID) { + st21nfca_se_get_atr(hdev); + r = nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE, + ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0); + if (r < 0) + return r; + } else if (r < 0) { + /* + * The activation tentative failed, the secure element + * is not connected. Remove from the list. + */ + nfc_remove_se(hdev->ndev, se_idx); + return r; + } + + return 0; +} +EXPORT_SYMBOL(st21nfca_hci_enable_se); + +int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx) +{ + int r; + + /* + * According to upper layer, se_idx == NFC_SE_UICC when + * info->se_status->is_uicc_enable is true should never happen + * Same for eSE. + */ + r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_OFF); + if (r < 0) + return r; + + return 0; +} +EXPORT_SYMBOL(st21nfca_hci_disable_se); + +int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + pr_debug("se_io %x\n", se_idx); + + switch (se_idx) { + case ST21NFCA_ESE_HOST_ID: + info->se_info.cb = cb; + info->se_info.cb_context = cb_context; + mod_timer(&info->se_info.bwi_timer, jiffies + + msecs_to_jiffies(info->se_info.wt_timeout)); + info->se_info.bwi_active = true; + return nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE, + ST21NFCA_EVT_TRANSMIT_DATA, + apdu, apdu_length); + default: + return -ENODEV; + } +} +EXPORT_SYMBOL(st21nfca_hci_se_io); + +static void st21nfca_se_wt_timeout(unsigned long data) +{ + /* + * No answer from the secure element + * within the defined timeout. + * Let's send a reset request as recovery procedure. + * According to the situation, we first try to send a software reset + * to the secure element. If the next command is still not + * answering in time, we send to the CLF a secure element hardware + * reset request. + */ + /* hardware reset managed through VCC_UICC_OUT power supply */ + u8 param = 0x01; + struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data; + + pr_debug("\n"); + + info->se_info.bwi_active = false; + + if (!info->se_info.xch_error) { + info->se_info.xch_error = true; + nfc_hci_send_event(info->hdev, ST21NFCA_APDU_READER_GATE, + ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0); + } else { + info->se_info.xch_error = false; + nfc_hci_send_event(info->hdev, ST21NFCA_DEVICE_MGNT_GATE, + ST21NFCA_EVT_SE_HARD_RESET, ¶m, 1); + } + info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); +} + +static void st21nfca_se_activation_timeout(unsigned long data) +{ + struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data; + + pr_debug("\n"); + + info->se_info.se_active = false; + + complete(&info->se_info.req_completion); +} + +/* + * Returns: + * <= 0: driver handled the event, skb consumed + * 1: driver does not handle the event, please do standard processing + */ +int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, + u8 event, struct sk_buff *skb) +{ + int r = 0; + + pr_debug("connectivity gate event: %x\n", event); + + switch (event) { + case ST21NFCA_EVT_CONNECTIVITY: + break; + case ST21NFCA_EVT_TRANSACTION: + break; + default: + return 1; + } + kfree_skb(skb); + return r; +} +EXPORT_SYMBOL(st21nfca_connectivity_event_received); + +int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, + u8 event, struct sk_buff *skb) +{ + int r = 0; + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + pr_debug("apdu reader gate event: %x\n", event); + + switch (event) { + case ST21NFCA_EVT_TRANSMIT_DATA: + del_timer_sync(&info->se_info.bwi_timer); + info->se_info.bwi_active = false; + r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, + ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); + if (r < 0) + goto exit; + + info->se_info.cb(info->se_info.cb_context, + skb->data, skb->len, 0); + break; + case ST21NFCA_EVT_WTX_REQUEST: + mod_timer(&info->se_info.bwi_timer, jiffies + + msecs_to_jiffies(info->se_info.wt_timeout)); + break; + } + +exit: + kfree_skb(skb); + return r; +} +EXPORT_SYMBOL(st21nfca_apdu_reader_event_received); + +void st21nfca_se_init(struct nfc_hci_dev *hdev) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + init_completion(&info->se_info.req_completion); + /* initialize timers */ + init_timer(&info->se_info.bwi_timer); + info->se_info.bwi_timer.data = (unsigned long)info; + info->se_info.bwi_timer.function = st21nfca_se_wt_timeout; + info->se_info.bwi_active = false; + + init_timer(&info->se_info.se_active_timer); + info->se_info.se_active_timer.data = (unsigned long)info; + info->se_info.se_active_timer.function = st21nfca_se_activation_timeout; + info->se_info.se_active = false; + + info->se_info.count_pipes = 0; + info->se_info.expected_pipes = 0; + + info->se_info.xch_error = false; + + info->se_info.wt_timeout = + ST21NFCA_BWI_TO_TIMEOUT(ST21NFCA_ATR_DEFAULT_BWI); +} +EXPORT_SYMBOL(st21nfca_se_init); + +void st21nfca_se_deinit(struct nfc_hci_dev *hdev) +{ + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + if (info->se_info.bwi_active) + del_timer_sync(&info->se_info.bwi_timer); + if (info->se_info.se_active) + del_timer_sync(&info->se_info.se_active_timer); + + info->se_info.bwi_active = false; + info->se_info.se_active = false; +} +EXPORT_SYMBOL(st21nfca_se_deinit); diff --git a/drivers/nfc/st21nfca/st21nfca_se.h b/drivers/nfc/st21nfca/st21nfca_se.h new file mode 100644 index 000000000000..b172cfcaeb90 --- /dev/null +++ b/drivers/nfc/st21nfca/st21nfca_se.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef __ST21NFCA_SE_H +#define __ST21NFCA_SE_H + +#include +#include + +/* + * ref ISO7816-3 chap 8.1. the initial character TS is followed by a + * sequence of at most 32 characters. + */ +#define ST21NFCA_ESE_MAX_LENGTH 33 +#define ST21NFCA_ESE_HOST_ID 0xc0 + +struct st21nfca_se_info { + u8 atr[ST21NFCA_ESE_MAX_LENGTH]; + struct completion req_completion; + + struct timer_list bwi_timer; + int wt_timeout; /* in msecs */ + bool bwi_active; + + struct timer_list se_active_timer; + bool se_active; + int expected_pipes; + int count_pipes; + + bool xch_error; + + se_io_cb_t cb; + void *cb_context; +}; + +int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, + u8 event, struct sk_buff *skb); +int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, + u8 event, struct sk_buff *skb); + +int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev); +int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx); +int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx); +int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); + +void st21nfca_se_init(struct nfc_hci_dev *hdev); +void st21nfca_se_deinit(struct nfc_hci_dev *hdev); +#endif /* __ST21NFCA_SE_H */ diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 5087fff96d86..cc2bdafb0c69 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -26,6 +26,8 @@ struct st21nfca_nfc_platform_data { unsigned int gpio_ena; unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; }; #endif /* _ST21NFCA_HCI_H_ */ -- cgit v1.2.3 From be6a6b43b597a37d96dbf74985f72045ccef0940 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 27 Jan 2015 15:57:59 +0200 Subject: net/mlx4_core: Add bad-cable event support If the firmware can detect a bad cable, allow it to generate an event, and print the problem in the log. Signed-off-by: Jack Morgenstein Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/eq.c | 22 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/fw.c | 9 ++++++++- include/linux/mlx4/device.h | 14 +++++++++++++- 3 files changed, 43 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2f2e6067426d..4df006d8afa4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev) u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); return async_ev_mask; } @@ -736,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) (unsigned long) eqe); break; + case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: + switch (eqe->subtype) { + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: + mlx4_warn(dev, "Bad cable detected on port %u\n", + eqe->event.bad_cable.port); + break; + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: + mlx4_warn(dev, "Unsupported cable detected\n"); + break; + default: + mlx4_dbg(dev, + "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + } + break; + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 982861d1df44..2eadc2882e4f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -145,7 +145,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [16] = "CONFIG DEV support", [17] = "Asymmetric EQs support", [18] = "More than 80 VFs support", - [19] = "Performance optimized for limited rule configuration flow steering support" + [19] = "Performance optimized for limited rule configuration flow steering support", + [20] = "Recoverable error events support" }; int i; @@ -859,6 +860,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); if (field32 & (1 << 0)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; + if (field32 & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -1562,6 +1565,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_VXLAN_OFFSET 0x0c #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 #define INIT_HCA_QPC_OFFSET 0x020 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) @@ -1668,6 +1672,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 5ef54e145e4d..c95d659a39f2 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -200,7 +200,8 @@ enum { MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, - MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20 }; enum { @@ -280,6 +281,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, + MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -288,6 +290,11 @@ enum { MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 }; +enum { + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, +}; + enum { MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, }; @@ -860,6 +867,11 @@ struct mlx4_eqe { } __packed tbl_change_info; } params; } __packed port_mgmt_change; + struct { + u8 reserved[3]; + u8 port; + u32 reserved1[5]; + } __packed bad_cable; } event; u8 slave_id; u8 reserved3[2]; -- cgit v1.2.3 From 5a03108689c6f3e448a920b42af04e6d28401f80 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 27 Jan 2015 15:58:02 +0200 Subject: net/mlx4_core: Adjust command timeouts to conform to the firmware spec The firmware spec states that the timeout for all commands should be 60 seconds. In the past, the spec indicated that there were several classes of timeout (short, medium, and long). The driver has these different timeout classes. We leave the class differentiation in the driver as-is (to protect against any future spec changes), but set the timeout for all classes to be 60 seconds. In addition, we fix a few commands which had hard-coded numeric timeouts specified. Signed-off-by: Jack Morgenstein Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 19 ++++++++++--------- drivers/net/ethernet/mellanox/mlx4/mr.c | 4 ++-- include/linux/mlx4/cmd.h | 6 +++--- 3 files changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2aa7c232d0b6..0c90d1072e47 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1774,8 +1774,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); } - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, - MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); @@ -2029,7 +2029,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { if (priv->mfunc.master.init_port_ref[port] == 1) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; } @@ -2040,7 +2040,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, if (!priv->mfunc.master.qp0_state[port].qp0_active && priv->mfunc.master.qp0_state[port].port_active) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); @@ -2055,15 +2055,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } struct mlx4_config_dev { @@ -2202,7 +2202,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); } int mlx4_get_phys_port_id(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 8dbdf1d29357..d21e884a0838 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -1155,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free); int mlx4_SYNC_TPT(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index c989442ffc6a..ae95adc78509 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -165,9 +165,9 @@ enum { }; enum { - MLX4_CMD_TIME_CLASS_A = 10000, - MLX4_CMD_TIME_CLASS_B = 10000, - MLX4_CMD_TIME_CLASS_C = 10000, + MLX4_CMD_TIME_CLASS_A = 60000, + MLX4_CMD_TIME_CLASS_B = 60000, + MLX4_CMD_TIME_CLASS_C = 60000, }; enum { -- cgit v1.2.3 From 3c313161353ad527de1a6ecde0f0d41700858fd6 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 24 Jan 2015 18:47:20 +0100 Subject: bcma: detect SPROM revision 11 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extracting values from it is still unsupported, but at least we'll display some meaningful error now. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/sprom.c | 3 ++- include/linux/ssb/ssb_regs.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index efb037f9c98a..206edd3ba668 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus) u16 offset = BCMA_CC_SPROM; u16 *sprom; size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, - SSB_SPROMSIZE_WORDS_R10, }; + SSB_SPROMSIZE_WORDS_R10, + SSB_SPROMSIZE_WORDS_R11, }; int i, err = 0; if (!bus->drv_cc.core) diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index f7b9100686c3..c0f707ac192b 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -173,6 +173,7 @@ #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) #define SSB_SPROMSIZE_WORDS_R10 230 +#define SSB_SPROMSIZE_WORDS_R11 234 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E -- cgit v1.2.3 From b504075f5903b969a54ef3a6ae994c0872edb259 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sun, 25 Jan 2015 11:11:14 +0100 Subject: bcma: add early_init function for PCIe core and move some fix into it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are some PCIe core fixes that need to be applied before accessing SPROM, otherwise reading it may fail. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_pci.c | 66 ++++++++++++++++++++++++------------ drivers/bcma/main.c | 7 ++++ include/linux/bcma/bcma_driver_pci.h | 2 ++ 3 files changed, 53 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index b85a505603e6..786666488a2d 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device, return bcma_pcie_mdio_read(pc, device, address); } +/************************************************** + * Early init. + **************************************************/ + +static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc) +{ + struct bcma_device *core = pc->core; + u16 val16, core_index; + uint regoff; + + regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET); + core_index = (u16)core->core_index; + + val16 = pcicore_read16(pc, regoff); + if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT) + != core_index) { + val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) | + (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK); + pcicore_write16(pc, regoff, val16); + } +} + +/* + * Apply some early fixes required before accessing SPROM. + * See also si_pci_fixcfg. + */ +void bcma_core_pci_early_init(struct bcma_drv_pci *pc) +{ + if (pc->early_setup_done) + return; + + pc->hostmode = bcma_core_pci_is_in_hostmode(pc); + if (pc->hostmode) + goto out; + + bcma_core_pci_fixcfg(pc); + +out: + pc->early_setup_done = true; +} + /************************************************** * Workarounds. **************************************************/ @@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc) tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN); } -static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc) -{ - struct bcma_device *core = pc->core; - u16 val16, core_index; - uint regoff; - - regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET); - core_index = (u16)core->core_index; - - val16 = pcicore_read16(pc, regoff); - if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT) - != core_index) { - val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) | - (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK); - pcicore_write16(pc, regoff, val16); - } -} - /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */ /* Needs to happen when coming out of 'standby'/'hibernate' */ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) @@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) { - bcma_core_pci_fixcfg(pc); bcma_pcicore_serdes_workaround(pc); bcma_core_pci_config_fixup(pc); } @@ -226,11 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc) if (pc->setup_done) return; - pc->hostmode = bcma_core_pci_is_in_hostmode(pc); + bcma_core_pci_early_init(pc); + if (pc->hostmode) bcma_core_pci_hostmode_init(pc); - - if (!pc->hostmode) + else bcma_core_pci_clientmode_init(pc); } diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 73b2ee3de972..38bde6eab8a4 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -402,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_chipcommon_early_init(&bus->drv_cc); } + /* Early init PCIE core */ + core = bcma_find_core(bus, BCMA_CORE_PCIE); + if (core) { + bus->drv_pci[0].core = core; + bcma_core_pci_early_init(&bus->drv_pci[0]); + } + /* Cores providing flash access go before SPROM init */ list_for_each_entry(core, &bus->cores, list) { if (bcma_is_core_needed_early(core->id.id)) diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 0333e605ea0d..3f809ae372c4 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -223,6 +223,7 @@ struct bcma_drv_pci_host { struct bcma_drv_pci { struct bcma_device *core; + u8 early_setup_done:1; u8 setup_done:1; u8 hostmode:1; @@ -237,6 +238,7 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) +extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc); extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); -- cgit v1.2.3 From 8be08a39d498d5d93ff5149276e34ccb4ec3757f Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sun, 25 Jan 2015 13:41:19 +0100 Subject: bcma: implement host code support for PCIe Gen 2 devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is stil incomplete, so we don't add PCI IDs of new devices yet. Purpose of this patch is to allow testing & adjusting rest of the code. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/host_pci.c | 6 ++++-- include/linux/bcma/bcma.h | 1 + include/linux/bcma/bcma_regs.h | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index cd9161a8b3a1..53c6a8a58859 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -13,10 +13,12 @@ static void bcma_host_pci_switch_core(struct bcma_device *core) { + int win2 = core->bus->host_is_pcie2 ? + BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2; + pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, core->addr); - pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, - core->wrap); + pci_write_config_dword(core->bus->host_pci, win2, core->wrap); core->bus->mapped_core = core; bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id); } diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index eb1c6a47b67f..994739da827f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -318,6 +318,7 @@ struct bcma_bus { const struct bcma_host_ops *ops; enum bcma_hosttype hosttype; + bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ union { /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ struct pci_dev *host_pci; diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index e64ae7bf80a1..ebd5c1fcdea4 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -64,6 +64,8 @@ #define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ #define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ +#define BCMA_PCIE2_BAR0_WIN2 0x70 + /* SiliconBackplane Address Map. * All regions may not exist on all chips. */ -- cgit v1.2.3 From 7866a621043fbaca3d7389e9b9f69dd1a2e5a855 Mon Sep 17 00:00:00 2001 From: Salam Noureddine Date: Tue, 27 Jan 2015 11:35:48 -0800 Subject: dev: add per net_device packet type chains When many pf_packet listeners are created on a lot of interfaces the current implementation using global packet type lists scales poorly. This patch adds per net_device packet type lists to fix this problem. The patch was originally written by Eric Biederman for linux-2.6.29. Tested on linux-3.16. Signed-off-by: "Eric W. Biederman" Signed-off-by: Salam Noureddine Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 + net/core/dev.c | 132 +++++++++++++++++++++++++++++----------------- 2 files changed, 86 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 642d426a668f..3d37c6eb1732 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1514,6 +1514,8 @@ struct net_device { struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; struct { struct list_head upper; diff --git a/net/core/dev.c b/net/core/dev.c index 7f028d441e98..1d564d68e31a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -371,9 +371,10 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev) static inline struct list_head *ptype_head(const struct packet_type *pt) { if (pt->type == htons(ETH_P_ALL)) - return &ptype_all; + return pt->dev ? &pt->dev->ptype_all : &ptype_all; else - return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; + return pt->dev ? &pt->dev->ptype_specific : + &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } /** @@ -1734,6 +1735,23 @@ static inline int deliver_skb(struct sk_buff *skb, return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } +static inline void deliver_ptype_list_skb(struct sk_buff *skb, + struct packet_type **pt, + struct net_device *dev, __be16 type, + struct list_head *ptype_list) +{ + struct packet_type *ptype, *pt_prev = *pt; + + list_for_each_entry_rcu(ptype, ptype_list, list) { + if (ptype->type != type) + continue; + if (pt_prev) + deliver_skb(skb, pt_prev, dev); + pt_prev = ptype; + } + *pt = pt_prev; +} + static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) { if (!ptype->af_packet_priv || !skb->sk) @@ -1757,45 +1775,54 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) struct packet_type *ptype; struct sk_buff *skb2 = NULL; struct packet_type *pt_prev = NULL; + struct list_head *ptype_list = &ptype_all; rcu_read_lock(); - list_for_each_entry_rcu(ptype, &ptype_all, list) { +again: + list_for_each_entry_rcu(ptype, ptype_list, list) { /* Never send packets back to the socket * they originated from - MvS (miquels@drinkel.ow.org) */ - if ((ptype->dev == dev || !ptype->dev) && - (!skb_loop_sk(ptype, skb))) { - if (pt_prev) { - deliver_skb(skb2, pt_prev, skb->dev); - pt_prev = ptype; - continue; - } + if (skb_loop_sk(ptype, skb)) + continue; - skb2 = skb_clone(skb, GFP_ATOMIC); - if (!skb2) - break; + if (pt_prev) { + deliver_skb(skb2, pt_prev, skb->dev); + pt_prev = ptype; + continue; + } - net_timestamp_set(skb2); + /* need to clone skb, done only once */ + skb2 = skb_clone(skb, GFP_ATOMIC); + if (!skb2) + goto out_unlock; - /* skb->nh should be correctly - set by sender, so that the second statement is - just protection against buggy protocols. - */ - skb_reset_mac_header(skb2); - - if (skb_network_header(skb2) < skb2->data || - skb_network_header(skb2) > skb_tail_pointer(skb2)) { - net_crit_ratelimited("protocol %04x is buggy, dev %s\n", - ntohs(skb2->protocol), - dev->name); - skb_reset_network_header(skb2); - } + net_timestamp_set(skb2); - skb2->transport_header = skb2->network_header; - skb2->pkt_type = PACKET_OUTGOING; - pt_prev = ptype; + /* skb->nh should be correctly + * set by sender, so that the second statement is + * just protection against buggy protocols. + */ + skb_reset_mac_header(skb2); + + if (skb_network_header(skb2) < skb2->data || + skb_network_header(skb2) > skb_tail_pointer(skb2)) { + net_crit_ratelimited("protocol %04x is buggy, dev %s\n", + ntohs(skb2->protocol), + dev->name); + skb_reset_network_header(skb2); } + + skb2->transport_header = skb2->network_header; + skb2->pkt_type = PACKET_OUTGOING; + pt_prev = ptype; + } + + if (ptype_list == &ptype_all) { + ptype_list = &dev->ptype_all; + goto again; } +out_unlock: if (pt_prev) pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); rcu_read_unlock(); @@ -2617,7 +2644,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, unsigned int len; int rc; - if (!list_empty(&ptype_all)) + if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) dev_queue_xmit_nit(skb, dev); len = skb->len; @@ -3615,7 +3642,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; struct net_device *orig_dev; - struct net_device *null_or_dev; bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; @@ -3658,11 +3684,15 @@ another_round: goto skip_taps; list_for_each_entry_rcu(ptype, &ptype_all, list) { - if (!ptype->dev || ptype->dev == skb->dev) { - if (pt_prev) - ret = deliver_skb(skb, pt_prev, orig_dev); - pt_prev = ptype; - } + if (pt_prev) + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = ptype; + } + + list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { + if (pt_prev) + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = ptype; } skip_taps: @@ -3718,19 +3748,21 @@ ncls: skb->vlan_tci = 0; } + type = skb->protocol; + /* deliver only exact match when indicated */ - null_or_dev = deliver_exact ? skb->dev : NULL; + if (likely(!deliver_exact)) { + deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, + &ptype_base[ntohs(type) & + PTYPE_HASH_MASK]); + } - type = skb->protocol; - list_for_each_entry_rcu(ptype, - &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { - if (ptype->type == type && - (ptype->dev == null_or_dev || ptype->dev == skb->dev || - ptype->dev == orig_dev)) { - if (pt_prev) - ret = deliver_skb(skb, pt_prev, orig_dev); - pt_prev = ptype; - } + deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, + &orig_dev->ptype_specific); + + if (unlikely(skb->dev != orig_dev)) { + deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, + &skb->dev->ptype_specific); } if (pt_prev) { @@ -6579,6 +6611,8 @@ void netdev_run_todo(void) /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); + BUG_ON(!list_empty(&dev->ptype_all)); + BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); @@ -6761,6 +6795,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, INIT_LIST_HEAD(&dev->adj_list.lower); INIT_LIST_HEAD(&dev->all_adj_list.upper); INIT_LIST_HEAD(&dev->all_adj_list.lower); + INIT_LIST_HEAD(&dev->ptype_all); + INIT_LIST_HEAD(&dev->ptype_specific); dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); -- cgit v1.2.3 From aafb3e98b27977148c8c86499684f8f5c3decfbb Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 29 Jan 2015 22:40:11 -0800 Subject: netdev: introduce new NETIF_F_HW_SWITCH_OFFLOAD feature flag for switch device offloads This is a high level feature flag for all switch asic offloads switch drivers set this flag on switch ports. Logical devices like bridge, bonds, vxlans can inherit this flag from their slaves/ports. The patch also adds the flag to NETIF_F_ONE_FOR_ALL, so that it gets propagated to the upperdevices (bridges and bonds). Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 8e30685affeb..7d59dc6ab789 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -66,6 +66,7 @@ enum { NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_BUSY_POLL_BIT, /* Busy poll */ + NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */ /* * Add your fresh new feature above and remember to update @@ -124,6 +125,7 @@ enum { #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ @@ -159,7 +161,9 @@ enum { */ #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ NETIF_F_SG | NETIF_F_HIGHDMA | \ - NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_HW_SWITCH_OFFLOAD) + /* * If one device doesn't support one of these features, then disable it * for all in netdev_increment_features. -- cgit v1.2.3 From add511b38266aa10c1079f9248854e6a415c4dc2 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Thu, 29 Jan 2015 22:40:12 -0800 Subject: bridge: add flags argument to ndo_bridge_setlink and ndo_bridge_dellink bridge flags are needed inside ndo_bridge_setlink/dellink handlers to avoid another call to parse IFLA_AF_SPEC inside these handlers This is used later in this series Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/rocker/rocker.c | 2 +- include/linux/netdevice.h | 6 ++++-- net/bridge/br_netlink.c | 4 ++-- net/bridge/br_private.h | 4 ++-- net/core/rtnetlink.c | 10 ++++++---- 7 files changed, 18 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 598c5070c629..efed92c7b731 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4327,7 +4327,8 @@ fw_exit: return status; } -static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) +static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) { struct be_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7bb421bfd84e..e4086fea4be2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7786,7 +7786,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, u16 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 11f4ffcc113d..f0d607ca5e73 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3722,7 +3722,7 @@ skip: } static int rocker_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, u16 flags) { struct rocker_port *rocker_port = netdev_priv(dev); struct nlattr *protinfo; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3d37c6eb1732..16251e96e6aa 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1154,13 +1154,15 @@ struct net_device_ops { int idx); int (*ndo_bridge_setlink)(struct net_device *dev, - struct nlmsghdr *nlh); + struct nlmsghdr *nlh, + u16 flags); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask); int (*ndo_bridge_dellink)(struct net_device *dev, - struct nlmsghdr *nlh); + struct nlmsghdr *nlh, + u16 flags); int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e08b260f33fe..088e80203845 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -494,7 +494,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) } /* Change state and parameters on port. */ -int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) +int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct nlattr *protinfo; struct nlattr *afspec; @@ -550,7 +550,7 @@ out: } /* Delete port information */ -int br_dellink(struct net_device *dev, struct nlmsghdr *nlh) +int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct nlattr *afspec; struct net_bridge_port *p; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e8e3f3681680..de0919975a25 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -819,8 +819,8 @@ extern struct rtnl_link_ops br_link_ops; int br_netlink_init(void); void br_netlink_fini(void); void br_ifinfo_notify(int event, struct net_bridge_port *port); -int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg); -int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg); +int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); +int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fedd7ab4085a..673cb4c6f391 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2991,7 +2991,7 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) goto out; } - err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh); + err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags); if (err) goto out; @@ -3002,7 +3002,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (!dev->netdev_ops->ndo_bridge_setlink) err = -EOPNOTSUPP; else - err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh); + err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, + flags); if (!err) { flags &= ~BRIDGE_FLAGS_SELF; @@ -3064,7 +3065,7 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) goto out; } - err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh); + err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); if (err) goto out; @@ -3075,7 +3076,8 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) if (!dev->netdev_ops->ndo_bridge_dellink) err = -EOPNOTSUPP; else - err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh); + err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, + flags); if (!err) { flags &= ~BRIDGE_FLAGS_SELF; -- cgit v1.2.3 From 366e41d9774d7010cb63112b6db2fce6dc7809c0 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Sat, 31 Jan 2015 10:40:13 -0500 Subject: ipv6: pull cork initialization into its own function. Pull IPv6 cork initialization into its own function that can be re-used. IPv6 specific cork data did not have an explicit data structure. This patch creats eone so that just ipv6 cork data can be as arguemts. Also, since IPv6 tries to save the flow label into inet_cork_full tructure, pass the full cork. Adjust ip6_cork_release() to take cork data structures. Signed-off-by: Vladislav Yasevich Signed-off-by: David S. Miller --- include/linux/ipv6.h | 12 ++-- net/ipv6/ip6_output.c | 158 ++++++++++++++++++++++++++++---------------------- 2 files changed, 96 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 2805062c013f..4d5169f5d7d1 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -125,6 +125,12 @@ struct ipv6_mc_socklist; struct ipv6_ac_socklist; struct ipv6_fl_socklist; +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + /** * struct ipv6_pinfo - ipv6 private area * @@ -217,11 +223,7 @@ struct ipv6_pinfo { struct ipv6_txoptions *opt; struct sk_buff *pktoptions; struct sk_buff *rxpmtu; - struct { - struct ipv6_txoptions *opt; - u8 hop_limit; - u8 tclass; - } cork; + struct inet6_cork cork; }; /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ce69a12ae48c..f9f08c43c6e1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1135,6 +1135,74 @@ static void ip6_append_data_mtu(unsigned int *mtu, } } +static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, + struct inet6_cork *v6_cork, + int hlimit, int tclass, struct ipv6_txoptions *opt, + struct rt6_info *rt, struct flowi6 *fl6) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + unsigned int mtu; + + /* + * setup for corking + */ + if (opt) { + if (WARN_ON(v6_cork->opt)) + return -EINVAL; + + v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); + if (unlikely(v6_cork->opt == NULL)) + return -ENOBUFS; + + v6_cork->opt->tot_len = opt->tot_len; + v6_cork->opt->opt_flen = opt->opt_flen; + v6_cork->opt->opt_nflen = opt->opt_nflen; + + v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt, + sk->sk_allocation); + if (opt->dst0opt && !v6_cork->opt->dst0opt) + return -ENOBUFS; + + v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt, + sk->sk_allocation); + if (opt->dst1opt && !v6_cork->opt->dst1opt) + return -ENOBUFS; + + v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt, + sk->sk_allocation); + if (opt->hopopt && !v6_cork->opt->hopopt) + return -ENOBUFS; + + v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt, + sk->sk_allocation); + if (opt->srcrt && !v6_cork->opt->srcrt) + return -ENOBUFS; + + /* need source address above miyazawa*/ + } + dst_hold(&rt->dst); + cork->base.dst = &rt->dst; + cork->fl.u.ip6 = *fl6; + v6_cork->hop_limit = hlimit; + v6_cork->tclass = tclass; + if (rt->dst.flags & DST_XFRM_TUNNEL) + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? + rt->dst.dev->mtu : dst_mtu(&rt->dst); + else + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? + rt->dst.dev->mtu : dst_mtu(rt->dst.path); + if (np->frag_size < mtu) { + if (np->frag_size) + mtu = np->frag_size; + } + cork->base.fragsize = mtu; + if (dst_allfrag(rt->dst.path)) + cork->base.flags |= IPCORK_ALLFRAG; + cork->base.length = 0; + + return 0; +} + int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, @@ -1162,59 +1230,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, /* * setup for corking */ - if (opt) { - if (WARN_ON(np->cork.opt)) - return -EINVAL; - - np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); - if (unlikely(np->cork.opt == NULL)) - return -ENOBUFS; - - np->cork.opt->tot_len = opt->tot_len; - np->cork.opt->opt_flen = opt->opt_flen; - np->cork.opt->opt_nflen = opt->opt_nflen; - - np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, - sk->sk_allocation); - if (opt->dst0opt && !np->cork.opt->dst0opt) - return -ENOBUFS; - - np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, - sk->sk_allocation); - if (opt->dst1opt && !np->cork.opt->dst1opt) - return -ENOBUFS; - - np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, - sk->sk_allocation); - if (opt->hopopt && !np->cork.opt->hopopt) - return -ENOBUFS; - - np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, - sk->sk_allocation); - if (opt->srcrt && !np->cork.opt->srcrt) - return -ENOBUFS; - - /* need source address above miyazawa*/ - } - dst_hold(&rt->dst); - cork->dst = &rt->dst; - inet->cork.fl.u.ip6 = *fl6; - np->cork.hop_limit = hlimit; - np->cork.tclass = tclass; - if (rt->dst.flags & DST_XFRM_TUNNEL) - mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? - rt->dst.dev->mtu : dst_mtu(&rt->dst); - else - mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? - rt->dst.dev->mtu : dst_mtu(rt->dst.path); - if (np->frag_size < mtu) { - if (np->frag_size) - mtu = np->frag_size; - } - cork->fragsize = mtu; - if (dst_allfrag(rt->dst.path)) - cork->flags |= IPCORK_ALLFRAG; - cork->length = 0; + err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit, + tclass, opt, rt, fl6); + if (err) + return err; exthdrlen = (opt ? opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; @@ -1226,8 +1245,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; - mtu = cork->fragsize; } + mtu = cork->fragsize; orig_mtu = mtu; hh_len = LL_RESERVED_SPACE(rt->dst.dev); @@ -1503,23 +1522,24 @@ error: } EXPORT_SYMBOL_GPL(ip6_append_data); -static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) +static void ip6_cork_release(struct inet_cork_full *cork, + struct inet6_cork *v6_cork) { - if (np->cork.opt) { - kfree(np->cork.opt->dst0opt); - kfree(np->cork.opt->dst1opt); - kfree(np->cork.opt->hopopt); - kfree(np->cork.opt->srcrt); - kfree(np->cork.opt); - np->cork.opt = NULL; + if (v6_cork->opt) { + kfree(v6_cork->opt->dst0opt); + kfree(v6_cork->opt->dst1opt); + kfree(v6_cork->opt->hopopt); + kfree(v6_cork->opt->srcrt); + kfree(v6_cork->opt); + v6_cork->opt = NULL; } - if (inet->cork.base.dst) { - dst_release(inet->cork.base.dst); - inet->cork.base.dst = NULL; - inet->cork.base.flags &= ~IPCORK_ALLFRAG; + if (cork->base.dst) { + dst_release(cork->base.dst); + cork->base.dst = NULL; + cork->base.flags &= ~IPCORK_ALLFRAG; } - memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); + memset(&cork->fl, 0, sizeof(cork->fl)); } int ip6_push_pending_frames(struct sock *sk) @@ -1599,7 +1619,7 @@ int ip6_push_pending_frames(struct sock *sk) } out: - ip6_cork_release(inet, np); + ip6_cork_release(&inet->cork, &np->cork); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); @@ -1618,6 +1638,6 @@ void ip6_flush_pending_frames(struct sock *sk) kfree_skb(skb); } - ip6_cork_release(inet_sk(sk), inet6_sk(sk)); + ip6_cork_release(&inet_sk(sk)->cork, &inet6_sk(sk)->cork); } EXPORT_SYMBOL_GPL(ip6_flush_pending_frames); -- cgit v1.2.3 From 4c946d9c11d173c2ea6b9081b248f8072e6b46f1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 27 Nov 2014 19:52:04 -0500 Subject: vmci: propagate msghdr all way down to __qp_memcpy_to_queue() Switch from passing msg->iov_iter.iov to passing msg itself Signed-off-by: Al Viro --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 16 ++++++++-------- include/linux/vmw_vmci_api.h | 2 +- net/vmw_vsock/vmci_transport.c | 3 +-- 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 7aaaf51e1596..35f19a683822 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, to_copy = size - bytes_copied; if (is_iovec) { - struct iovec *iov = (struct iovec *)src; + struct msghdr *msg = (struct msghdr *)src; int err; /* The iovec will track bytes_copied internally. */ - err = memcpy_fromiovec((u8 *)va + page_offset, - iov, to_copy); + err = memcpy_from_msg((u8 *)va + page_offset, + msg, to_copy); if (err != 0) { if (kernel_if->host) kunmap(kernel_if->u.h.page[page_index]); @@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest, */ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, u64 queue_offset, - const void *src, + const void *msg, size_t src_offset, size_t size) { @@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, * We ignore src_offset because src is really a struct iovec * and will * maintain offset internally. */ - return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); + return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); } /* @@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek); * of bytes enqueued or < 0 on error. */ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, - void *iov, + struct msghdr *msg, size_t iov_size, int buf_type) { ssize_t result; - if (!qpair || !iov) + if (!qpair) return VMCI_ERROR_INVALID_ARGS; qp_lock(qpair); @@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, result = qp_enqueue_locked(qpair->produce_q, qpair->consume_q, qpair->produce_q_size, - iov, iov_size, + msg, iov_size, qp_memcpy_to_queue_iov); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 5691f752ce8f..63df3a2a8ce5 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, int mode); ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); + struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 02d2e5229240..7f3255084a6c 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1850,8 +1850,7 @@ static ssize_t vmci_transport_stream_enqueue( struct msghdr *msg, size_t len) { - /* XXX: stripping const */ - return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0); + return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0); } static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk) -- cgit v1.2.3 From af2b040e470b470bfc881981db3c796072853eae Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 27 Nov 2014 21:44:24 -0500 Subject: rxrpc: switch rxrpc_send_data() to iov_iter primitives Convert skb_add_data() to iov_iter; allows to get rid of the explicit messing with iovec in its only caller - skb_add_data() will keep advancing ->msg_iter for us, so there's no need to similate that manually. Signed-off-by: Al Viro --- include/linux/skbuff.h | 11 +++++------ net/rxrpc/ar-output.c | 43 ++++++++++--------------------------------- 2 files changed, 15 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 85ab7d72b54c..9a8bafee1b67 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2484,19 +2484,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) } static inline int skb_add_data(struct sk_buff *skb, - char __user *from, int copy) + struct iov_iter *from, int copy) { const int off = skb->len; if (skb->ip_summed == CHECKSUM_NONE) { - int err = 0; - __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), - copy, 0, &err); - if (!err) { + __wsum csum = 0; + if (csum_and_copy_from_iter(skb_put(skb, copy), copy, + &csum, from) == copy) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } - } else if (!copy_from_user(skb_put(skb, copy), from, copy)) + } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) return 0; __skb_trim(skb, off); diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index e1a9373e5979..963a5b91f3e8 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -529,13 +529,11 @@ static int rxrpc_send_data(struct kiocb *iocb, struct msghdr *msg, size_t len) { struct rxrpc_skb_priv *sp; - unsigned char __user *from; struct sk_buff *skb; - const struct iovec *iov; struct sock *sk = &rx->sk; long timeo; bool more; - int ret, ioc, segment, copied; + int ret, copied; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); @@ -545,25 +543,17 @@ static int rxrpc_send_data(struct kiocb *iocb, if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) return -EPIPE; - iov = msg->msg_iter.iov; - ioc = msg->msg_iter.nr_segs - 1; - from = iov->iov_base; - segment = iov->iov_len; - iov++; more = msg->msg_flags & MSG_MORE; skb = call->tx_pending; call->tx_pending = NULL; copied = 0; - do { + if (len > iov_iter_count(&msg->msg_iter)) + len = iov_iter_count(&msg->msg_iter); + while (len) { int copy; - if (segment > len) - segment = len; - - _debug("SEGMENT %d @%p", segment, from); - if (!skb) { size_t size, chunk, max, space; @@ -631,13 +621,13 @@ static int rxrpc_send_data(struct kiocb *iocb, /* append next segment of data to the current buffer */ copy = skb_tailroom(skb); ASSERTCMP(copy, >, 0); - if (copy > segment) - copy = segment; + if (copy > len) + copy = len; if (copy > sp->remain) copy = sp->remain; _debug("add"); - ret = skb_add_data(skb, from, copy); + ret = skb_add_data(skb, &msg->msg_iter, copy); _debug("added"); if (ret < 0) goto efault; @@ -646,18 +636,6 @@ static int rxrpc_send_data(struct kiocb *iocb, copied += copy; len -= copy; - segment -= copy; - from += copy; - while (segment == 0 && ioc > 0) { - from = iov->iov_base; - segment = iov->iov_len; - iov++; - ioc--; - } - if (len == 0) { - segment = 0; - ioc = 0; - } /* check for the far side aborting the call or a network error * occurring */ @@ -665,7 +643,7 @@ static int rxrpc_send_data(struct kiocb *iocb, goto call_aborted; /* add the packet to the send queue if it's now full */ - if (sp->remain <= 0 || (segment == 0 && !more)) { + if (sp->remain <= 0 || (!len && !more)) { struct rxrpc_connection *conn = call->conn; uint32_t seq; size_t pad; @@ -711,11 +689,10 @@ static int rxrpc_send_data(struct kiocb *iocb, memcpy(skb->head, &sp->hdr, sizeof(struct rxrpc_header)); - rxrpc_queue_packet(call, skb, segment == 0 && !more); + rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more); skb = NULL; } - - } while (segment > 0); + } success: ret = copied; -- cgit v1.2.3 From 21226abb4e9f14d88238964d89b279e461ddc30c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 28 Nov 2014 15:48:29 -0500 Subject: net: switch memcpy_fromiovec()/memcpy_fromiovecend() users to copy_from_iter() That takes care of the majority of ->sendmsg() instances - most of them via memcpy_to_msg() or assorted getfrag() callbacks. One place where we still keep memcpy_fromiovecend() is tipc - there we potentially read the same data over and over; separate patch, that... Signed-off-by: Al Viro --- include/linux/skbuff.h | 3 +-- include/net/udplite.h | 3 +-- net/ipv4/ip_output.c | 6 ++---- net/ipv4/ping.c | 14 +++++++------- net/ipv4/raw.c | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv6/raw.c | 2 +- 7 files changed, 14 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9a8bafee1b67..b349c96dc80a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2692,8 +2692,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { - /* XXX: stripping const */ - return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); + return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; } static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) diff --git a/include/net/udplite.h b/include/net/udplite.h index ae7c8d1fbcad..80761938b9a7 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct msghdr *msg = from; - /* XXX: stripping const */ - return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len); + return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0; } /* Designate sk as UDP-Lite socket */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b50861b22b6b..f998bc87ae38 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -755,13 +755,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk struct msghdr *msg = from; if (skb->ip_summed == CHECKSUM_PARTIAL) { - /* XXX: stripping const */ - if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0) + if (copy_from_iter(to, len, &msg->msg_iter) != len) return -EFAULT; } else { __wsum csum = 0; - /* XXX: stripping const */ - if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0) + if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len) return -EFAULT; skb->csum = csum_block_add(skb->csum, csum, odd); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 9e15ba701401..e9f66e1cda50 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -599,18 +599,18 @@ int ping_getfrag(void *from, char *to, struct pingfakehdr *pfh = (struct pingfakehdr *)from; if (offset == 0) { - if (fraglen < sizeof(struct icmphdr)) + fraglen -= sizeof(struct icmphdr); + if (fraglen < 0) BUG(); - if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr), - pfh->msg->msg_iter.iov, 0, fraglen - sizeof(struct icmphdr), - &pfh->wcheck)) + if (csum_and_copy_from_iter(to + sizeof(struct icmphdr), + fraglen, &pfh->wcheck, + &pfh->msg->msg_iter) != fraglen) return -EFAULT; } else if (offset < sizeof(struct icmphdr)) { BUG(); } else { - if (csum_partial_copy_fromiovecend - (to, pfh->msg->msg_iter.iov, offset - sizeof(struct icmphdr), - fraglen, &pfh->wcheck)) + if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck, + &pfh->msg->msg_iter) != fraglen) return -EFAULT; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2c9d252072a2..f027a708b7e0 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -382,7 +382,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->transport_header = skb->network_header; err = -EFAULT; - if (memcpy_fromiovecend((void *)iph, msg->msg_iter.iov, 0, length)) + if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 93c74829cbce..71fb37c70581 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4368,7 +4368,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; - if (copy_from_iter(skb_put(skb, size), size, &msg->msg_iter) != size) + if (memcpy_from_msg(skb_put(skb, size), msg, size)) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 0dbb328fa688..dae7f1a1e464 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -648,7 +648,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; - err = memcpy_fromiovecend((void *)iph, msg->msg_iter.iov, 0, length); + err = memcpy_from_msg(iph, msg, length); if (err) goto error_fault; -- cgit v1.2.3 From 31a25fae85956e3a9c778141d29e5e803fb0b124 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 28 Nov 2014 15:53:57 -0500 Subject: net: bury net/core/iovec.c - nothing in there is used anymore Signed-off-by: Al Viro --- include/linux/socket.h | 7 --- net/core/Makefile | 2 +- net/core/iovec.c | 137 ------------------------------------------------- 3 files changed, 1 insertion(+), 145 deletions(-) delete mode 100644 net/core/iovec.c (limited to 'include/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index 6e49a14365dc..5c19cba34dce 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -318,13 +318,6 @@ struct ucred { /* IPX options */ #define IPX_TYPE 1 -extern int csum_partial_copy_fromiovecend(unsigned char *kdata, - struct iovec *iov, - int offset, - unsigned int len, __wsum *csump); -extern unsigned long iov_pages(const struct iovec *iov, int offset, - unsigned long nr_segs); - extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); diff --git a/net/core/Makefile b/net/core/Makefile index 235e6c50708d..fec0856dd6c0 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux networking core. # -obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ +obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o diff --git a/net/core/iovec.c b/net/core/iovec.c deleted file mode 100644 index dcbe98b3726a..000000000000 --- a/net/core/iovec.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * iovec manipulation routines. - * - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Fixes: - * Andrew Lunn : Errors in iovec copying. - * Pedro Roque : Added memcpy_fromiovecend and - * csum_..._fromiovecend. - * Andi Kleen : fixed error handling for 2.1 - * Alexey Kuznetsov: 2.1 optimisations - * Andi Kleen : Fix csum*fromiovecend for IPv6. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * And now for the all-in-one: copy and checksum from a user iovec - * directly to a datagram - * Calls to csum_partial but the last must be in 32 bit chunks - * - * ip_build_xmit must ensure that when fragmenting only the last - * call to this function will be unaligned also. - */ -int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, - int offset, unsigned int len, __wsum *csump) -{ - __wsum csum = *csump; - int partial_cnt = 0, err = 0; - - /* Skip over the finished iovecs */ - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - iov++; - } - - while (len > 0) { - u8 __user *base = iov->iov_base + offset; - int copy = min_t(unsigned int, len, iov->iov_len - offset); - - offset = 0; - - /* There is a remnant from previous iov. */ - if (partial_cnt) { - int par_len = 4 - partial_cnt; - - /* iov component is too short ... */ - if (par_len > copy) { - if (copy_from_user(kdata, base, copy)) - goto out_fault; - kdata += copy; - base += copy; - partial_cnt += copy; - len -= copy; - iov++; - if (len) - continue; - *csump = csum_partial(kdata - partial_cnt, - partial_cnt, csum); - goto out; - } - if (copy_from_user(kdata, base, par_len)) - goto out_fault; - csum = csum_partial(kdata - partial_cnt, 4, csum); - kdata += par_len; - base += par_len; - copy -= par_len; - len -= par_len; - partial_cnt = 0; - } - - if (len > copy) { - partial_cnt = copy % 4; - if (partial_cnt) { - copy -= partial_cnt; - if (copy_from_user(kdata + copy, base + copy, - partial_cnt)) - goto out_fault; - } - } - - if (copy) { - csum = csum_and_copy_from_user(base, kdata, copy, - csum, &err); - if (err) - goto out; - } - len -= copy + partial_cnt; - kdata += copy + partial_cnt; - iov++; - } - *csump = csum; -out: - return err; - -out_fault: - err = -EFAULT; - goto out; -} -EXPORT_SYMBOL(csum_partial_copy_fromiovecend); - -unsigned long iov_pages(const struct iovec *iov, int offset, - unsigned long nr_segs) -{ - unsigned long seg, base; - int pages = 0, len, size; - - while (nr_segs && (offset >= iov->iov_len)) { - offset -= iov->iov_len; - ++iov; - --nr_segs; - } - - for (seg = 0; seg < nr_segs; seg++) { - base = (unsigned long)iov[seg].iov_base + offset; - len = iov[seg].iov_len - offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - pages += size; - offset = 0; - } - - return pages; -} -EXPORT_SYMBOL(iov_pages); -- cgit v1.2.3 From aad9a1cec7dcd1d45809b64643fce37061b17788 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 10 Dec 2014 14:49:01 -0500 Subject: vhost: switch vhost get_indirect() to iov_iter, kill memcpy_fromiovec() Cc: Michael S. Tsirkin Cc: kvm@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Al Viro --- drivers/vhost/vhost.c | 6 ++++-- include/linux/uio.h | 1 - lib/iovec.c | 25 ------------------------- 3 files changed, 4 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index cb807d0ea498..2ee28266fd07 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1125,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq, struct vring_desc desc; unsigned int i = 0, count, found = 0; u32 len = vhost32_to_cpu(vq, indirect->len); + struct iov_iter from; int ret; /* Sanity check */ @@ -1142,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq, vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } + iov_iter_init(&from, READ, vq->indirect, ret, len); /* We will use the result as an address to read from, so most * architectures only need a compiler barrier here. */ @@ -1164,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq, i, count); return -EINVAL; } - if (unlikely(memcpy_fromiovec((unsigned char *)&desc, - vq->indirect, sizeof desc))) { + if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) != + sizeof(desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; diff --git a/include/linux/uio.h b/include/linux/uio.h index 1c5e453f7ea9..af3439f4ebf2 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -135,7 +135,6 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/lib/iovec.c b/lib/iovec.c index 2d99cb4a5006..4a90875c64ae 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -2,31 +2,6 @@ #include #include -/* - * Copy iovec to kernel. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, len, iov->iov_len); - if (copy_from_user(kdata, iov->iov_base, copy)) - return -EFAULT; - len -= copy; - kdata += copy; - iov->iov_base += copy; - iov->iov_len -= copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_fromiovec); - /* * Copy kernel to iovec. Returns -EFAULT on error. */ -- cgit v1.2.3 From ba7438aed924133df54a60e4cd5499d359bcf2a8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 10 Dec 2014 15:51:28 -0500 Subject: vhost: don't bother copying iovecs in handle_rx(), kill memcpy_toiovecend() Cc: Michael S. Tsirkin Cc: kvm@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Al Viro --- drivers/vhost/net.c | 82 +++++++++++++++-------------------------------------- include/linux/uio.h | 3 -- lib/iovec.c | 26 ----------------- 3 files changed, 23 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index d86cc9bb9ea4..e022cc40303d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref { struct vhost_net_virtqueue { struct vhost_virtqueue vq; - /* hdr is used to store the virtio header. - * Since each iovec has >= 1 byte length, we never need more than - * header length entries to store the header. */ - struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; size_t vhost_hlen; size_t sock_hlen; /* vhost zerocopy support fields below: */ @@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock) sock_flag(sock->sk, SOCK_ZEROCOPY); } -/* Pop first len bytes from iovec. Return number of segments used. */ -static int move_iovec_hdr(struct iovec *from, struct iovec *to, - size_t len, int iov_count) -{ - int seg = 0; - size_t size; - - while (len && seg < iov_count) { - size = min(from->iov_len, len); - to->iov_base = from->iov_base; - to->iov_len = size; - from->iov_len -= size; - from->iov_base += size; - len -= size; - ++from; - ++to; - ++seg; - } - return seg; -} -/* Copy iovec entries for len bytes from iovec. */ -static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, - size_t len, int iovcount) -{ - int seg = 0; - size_t size; - - while (len && seg < iovcount) { - size = min(from->iov_len, len); - to->iov_base = from->iov_base; - to->iov_len = size; - len -= size; - ++from; - ++to; - ++seg; - } -} - /* In case of DMA done not in order in lower device driver for some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -570,9 +528,9 @@ static void handle_rx(struct vhost_net *net) .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; - struct virtio_net_hdr_mrg_rxbuf hdr = { - .hdr.flags = 0, - .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE + struct virtio_net_hdr hdr = { + .flags = 0, + .gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; int err, mergeable; @@ -580,6 +538,7 @@ static void handle_rx(struct vhost_net *net) size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; struct socket *sock; + struct iov_iter fixup; mutex_lock(&vq->mutex); sock = vq->private_data; @@ -624,14 +583,19 @@ static void handle_rx(struct vhost_net *net) break; } /* We don't need to be notified again. */ - if (unlikely((vhost_hlen))) - /* Skip header. TODO: support TSO. */ - move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); - else - /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: - * needed because recvmsg can modify msg_iov. */ - copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); - iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len); + iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); + fixup = msg.msg_iter; + if (unlikely((vhost_hlen))) { + /* We will supply the header ourselves + * TODO: support TSO. + */ + iov_iter_advance(&msg.msg_iter, vhost_hlen); + } else { + /* It'll come from socket; we'll need to patch + * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF + */ + iov_iter_advance(&fixup, sizeof(hdr)); + } err = sock->ops->recvmsg(NULL, sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: @@ -643,18 +607,18 @@ static void handle_rx(struct vhost_net *net) vhost_discard_vq_desc(vq, headcount); continue; } + /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ if (unlikely(vhost_hlen) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0, - vhost_hlen)) { + copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { vq_err(vq, "Unable to write vnet_hdr at addr %p\n", vq->iov->iov_base); break; } - /* TODO: Should check and handle checksum. */ + /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF + * TODO: Should check and handle checksum. + */ if (likely(mergeable) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, - offsetof(typeof(hdr), num_buffers), - sizeof hdr.num_buffers)) { + copy_to_iter(&headcount, 2, &fixup) != 2) { vq_err(vq, "Failed num_buffers write"); vhost_discard_vq_desc(vq, headcount); break; diff --git a/include/linux/uio.h b/include/linux/uio.h index af3439f4ebf2..02bd8a92038a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -137,7 +137,4 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct io int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); -int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, - int offset, int len); - #endif diff --git a/lib/iovec.c b/lib/iovec.c index 4a90875c64ae..d8f17a9b1ccf 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -2,32 +2,6 @@ #include #include -/* - * Copy kernel to iovec. Returns -EFAULT on error. - */ - -int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, - int offset, int len) -{ - int copy; - for (; len > 0; ++iov) { - /* Skip over the finished iovecs */ - if (unlikely(offset >= iov->iov_len)) { - offset -= iov->iov_len; - continue; - } - copy = min_t(unsigned int, iov->iov_len - offset, len); - if (copy_to_user(iov->iov_base + offset, kdata, copy)) - return -EFAULT; - offset = 0; - kdata += copy; - len -= copy; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovecend); - /* * Copy iovec to kernel. Returns -EFAULT on error. */ -- cgit v1.2.3 From 57dd8a0735aabff4862025cf64ad94da3d80e620 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 10 Dec 2014 16:03:43 -0500 Subject: vhost: vhost_scsi_handle_vq() should just use copy_from_user() it has just verified that it asks no more than the length of the first segment of iovec. And with that the last user of stuff in lib/iovec.c is gone. RIP. Cc: Michael S. Tsirkin Cc: Nicholas A. Bellinger Cc: kvm@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Al Viro --- drivers/vhost/scsi.c | 2 +- include/linux/uio.h | 2 -- lib/Makefile | 2 +- lib/iovec.c | 36 ------------------------------------ 4 files changed, 2 insertions(+), 40 deletions(-) delete mode 100644 lib/iovec.c (limited to 'include/linux') diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index d695b1673ae5..dc78d87e0fc2 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1079,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) req_size, vq->iov[0].iov_len); break; } - ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); + ret = copy_from_user(req, vq->iov[0].iov_base, req_size); if (unlikely(ret)) { vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); break; diff --git a/include/linux/uio.h b/include/linux/uio.h index 02bd8a92038a..3e0cb4ea3905 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -135,6 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, - int offset, int len); #endif diff --git a/lib/Makefile b/lib/Makefile index 3c3b30b9e020..1071d06398c3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -24,7 +24,7 @@ obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o diff --git a/lib/iovec.c b/lib/iovec.c deleted file mode 100644 index d8f17a9b1ccf..000000000000 --- a/lib/iovec.c +++ /dev/null @@ -1,36 +0,0 @@ -#include -#include -#include - -/* - * Copy iovec to kernel. Returns -EFAULT on error. - */ - -int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, - int offset, int len) -{ - /* No data? Done! */ - if (len == 0) - return 0; - - /* Skip over the finished iovecs */ - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - iov++; - } - - while (len > 0) { - u8 __user *base = iov->iov_base + offset; - int copy = min_t(unsigned int, len, iov->iov_len - offset); - - offset = 0; - if (copy_from_user(kdata, base, copy)) - return -EFAULT; - len -= copy; - kdata += copy; - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_fromiovecend); -- cgit v1.2.3 From 2bd82484bb4c5db1d5dc983ac7c409b2782e0154 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 3 Feb 2015 23:48:24 -0800 Subject: xps: fix xps for stacked devices A typical qdisc setup is the following : bond0 : bonding device, using HTB hierarchy eth1/eth2 : slaves, multiqueue NIC, using MQ + FQ qdisc XPS allows to spread packets on specific tx queues, based on the cpu doing the send. Problem is that dequeues from bond0 qdisc can happen on random cpus, due to the fact that qdisc_run() can dequeue a batch of packets. CPUA -> queue packet P1 on bond0 qdisc, P1->ooo_okay=1 CPUA -> queue packet P2 on bond0 qdisc, P2->ooo_okay=0 CPUB -> dequeue packet P1 from bond0 enqueue packet on eth1/eth2 CPUC -> dequeue packet P2 from bond0 enqueue packet on eth1/eth2 using sk cache (ooo_okay is 0) get_xps_queue() then might select wrong queue for P1, since current cpu might be different than CPUA. P2 might be sent on the old queue (stored in sk->sk_tx_queue_mapping), if CPUC runs a bit faster (or CPUB spins a bit on qdisc lock) Effect of this bug is TCP reorders, and more generally not optimal TX queue placement. (A victim bulk flow can be migrated to the wrong TX queue for a while) To fix this, we have to record sender cpu number the first time dev_queue_xmit() is called for one tx skb. We can union napi_id (used on receive path) and sender_cpu, granted we clear sender_cpu in skb_scrub_packet() (credit to Willem for this union idea) Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Cc: Nandita Dukkipati Cc: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++-- net/core/flow_dissector.c | 7 ++++++- net/core/skbuff.c | 4 ++++ 3 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 85ab7d72b54c..2748ff639144 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -626,8 +626,11 @@ struct sk_buff { __u32 hash; __be16 vlan_proto; __u16 vlan_tci; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int napi_id; +#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index beb83d1ac1c6..2c35c02a931e 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -422,7 +422,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { map = rcu_dereference( - dev_maps->cpu_map[raw_smp_processor_id()]); + dev_maps->cpu_map[skb->sender_cpu - 1]); if (map) { if (map->len == 1) queue_index = map->queues[0]; @@ -468,6 +468,11 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, { int queue_index = 0; +#ifdef CONFIG_XPS + if (skb->sender_cpu == 0) + skb->sender_cpu = raw_smp_processor_id() + 1; +#endif + if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a5bff2767f15..88c613eab142 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -825,6 +825,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #ifdef CONFIG_NET_RX_BUSY_POLL CHECK_SKB_FIELD(napi_id); #endif +#ifdef CONFIG_XPS + CHECK_SKB_FIELD(sender_cpu); +#endif #ifdef CONFIG_NET_SCHED CHECK_SKB_FIELD(tc_index); #ifdef CONFIG_NET_CLS_ACT @@ -4169,6 +4172,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->ignore_df = 0; skb_dst_drop(skb); skb->mark = 0; + skb->sender_cpu = 0; skb_init_secmark(skb); secpath_reset(skb); nf_reset(skb); -- cgit v1.2.3 From dcdc8994697faa789669c3fdaca1a8bc27a8f356 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 2 Feb 2015 16:07:34 -0800 Subject: net: add skb functions to process remote checksum offload This patch adds skb_remcsum_process and skb_gro_remcsum_process to perform the appropriate adjustments to the skb when receiving remote checksum offload. Updated vxlan and gue to use these functions. Tested: Ran TCP_RR and TCP_STREAM netperf for VXLAN and GUE, did not see any change in performance. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 18 ++---------------- include/linux/netdevice.h | 15 +++++++++++++++ include/linux/skbuff.h | 21 +++++++++++++++++++++ net/ipv4/fou.c | 18 ++---------------- 4 files changed, 40 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 31bac2a21ce3..c184717e8b28 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -558,7 +558,6 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, u32 data) { size_t start, offset, plen; - __wsum delta; if (skb->remcsum_offload) return vh; @@ -580,12 +579,7 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return NULL; } - delta = remcsum_adjust((void *)vh + hdrlen, - NAPI_GRO_CB(skb)->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); - NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset); skb->remcsum_offload = 1; @@ -1159,7 +1153,6 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, size_t hdrlen, u32 data) { size_t start, offset, plen; - __wsum delta; if (skb->remcsum_offload) { /* Already processed in GRO path */ @@ -1179,14 +1172,7 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, vh = (struct vxlanhdr *)(udp_hdr(skb) + 1); - if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) - __skb_checksum_complete(skb); - - delta = remcsum_adjust((void *)vh + hdrlen, - skb->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); + skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset); return vh; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 16251e96e6aa..1347ac50d2af 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2318,6 +2318,21 @@ do { \ compute_pseudo(skb, proto)); \ } while (0) +static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset) +{ + __wsum delta; + + BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); + + delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); +} + + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2748ff639144..5405dfe02572 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3099,6 +3099,27 @@ do { \ compute_pseudo(skb, proto)); \ } while (0) +/* Update skbuf and packet to reflect the remote checksum offload operation. + * When called, ptr indicates the starting point for skb->csum when + * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete + * here, skb_postpull_rcsum is done so skb->csum start is ptr. + */ +static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset) +{ + __wsum delta; + + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { + __skb_checksum_complete(skb); + skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); + } + + delta = remcsum_adjust(ptr, skb->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); +} + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 3bc0cf07661c..92ddea1e6457 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -70,7 +70,6 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, size_t start = ntohs(pd[0]); size_t offset = ntohs(pd[1]); size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); - __wsum delta; if (skb->remcsum_offload) { /* Already processed in GRO path */ @@ -82,14 +81,7 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, return NULL; guehdr = (struct guehdr *)&udp_hdr(skb)[1]; - if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) - __skb_checksum_complete(skb); - - delta = remcsum_adjust((void *)guehdr + hdrlen, - skb->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); + skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); return guehdr; } @@ -228,7 +220,6 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, size_t start = ntohs(pd[0]); size_t offset = ntohs(pd[1]); size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); - __wsum delta; if (skb->remcsum_offload) return guehdr; @@ -243,12 +234,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, return NULL; } - delta = remcsum_adjust((void *)guehdr + hdrlen, - NAPI_GRO_CB(skb)->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); - NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); skb->remcsum_offload = 1; -- cgit v1.2.3 From 61bd3857ff2c7daf756d49b41e6277bbdaa8f789 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Tue, 3 Feb 2015 16:48:29 +0200 Subject: net/core: Add event for a change in slave state Add event which provides an indication on a change in the state of a bonding slave. The event handler should cast the pointer to the appropriate type (struct netdev_bonding_info) in order to get the full info about the slave. Signed-off-by: Moni Shoua Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- include/linux/netdevice.h | 15 +++++++++++++++ net/core/dev.c | 20 ++++++++++++++++++++ net/core/rtnetlink.c | 1 + 3 files changed, 36 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1347ac50d2af..ce784d5018e0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -51,6 +51,7 @@ #include #include #include +#include struct netpoll_info; struct device; @@ -2056,6 +2057,7 @@ struct pcpu_sw_netstats { #define NETDEV_RESEND_IGMP 0x0016 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ #define NETDEV_CHANGEINFODATA 0x0018 +#define NETDEV_BONDING_INFO 0x0019 int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -3494,6 +3496,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; /* must be first */ + struct netdev_bonding_info bonding_info; +}; + +void netdev_bonding_info_change(struct net_device *dev, + struct netdev_bonding_info *bonding_info); + static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { diff --git a/net/core/dev.c b/net/core/dev.c index 1d564d68e31a..ede0b161b115 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5355,6 +5355,26 @@ void netdev_upper_dev_unlink(struct net_device *dev, } EXPORT_SYMBOL(netdev_upper_dev_unlink); +/** + * netdev_bonding_info_change - Dispatch event about slave change + * @dev: device + * @netdev_bonding_info: info to dispatch + * + * Send NETDEV_BONDING_INFO to netdev notifiers with info. + * The caller must hold the RTNL lock. + */ +void netdev_bonding_info_change(struct net_device *dev, + struct netdev_bonding_info *bonding_info) +{ + struct netdev_notifier_bonding_info info; + + memcpy(&info.bonding_info, bonding_info, + sizeof(struct netdev_bonding_info)); + call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev, + &info.info); +} +EXPORT_SYMBOL(netdev_bonding_info_change); + void netdev_adjacent_add_links(struct net_device *dev) { struct netdev_adjacent *iter; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 673cb4c6f391..4cd5e350d129 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3180,6 +3180,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi case NETDEV_UNREGISTER_FINAL: case NETDEV_RELEASE: case NETDEV_JOIN: + case NETDEV_BONDING_INFO: break; default: rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); -- cgit v1.2.3 From 59e14e325066be49b49b6c2503337c69a9ee29fc Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Tue, 3 Feb 2015 16:48:32 +0200 Subject: net/mlx4_core: Port aggregation low level interface Implement the hardware interface required for port aggregation. 1. Disable RX port check on receive - don't perform a validity check that matches to QP's port and the port where the packet is received. 2. Virtual to physical port remap - configure virtual to physical port mapping. Port remap capability for virtual functions. Signed-off-by: Moni Shoua Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 9 +++++ drivers/net/ethernet/mellanox/mlx4/fw.c | 56 +++++++++++++++++++++++++++++--- include/linux/mlx4/cmd.h | 7 ++++ include/linux/mlx4/device.h | 10 +++++- include/linux/mlx4/qp.h | 1 + 5 files changed, 77 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 154effbfd8be..a681d7c0bb9f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1583,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_CMD_EPERM_wrapper }, + { + .opcode = MLX4_CMD_VIRT_PORT_MAP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index dbabfae3a3de..4b08a393ebcb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -142,7 +142,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [17] = "Asymmetric EQs support", [18] = "More than 80 VFs support", [19] = "Performance optimized for limited rule configuration flow steering support", - [20] = "Recoverable error events support" + [20] = "Recoverable error events support", + [21] = "Port Remap support" }; int i; @@ -863,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); if (field & 0x20) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; @@ -1120,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); - /* For guests, disable mw type 2 */ + /* For guests, disable mw type 2 and port remap*/ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; + bmme_flags &= ~MLX4_FLAG_PORT_REMAP; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); /* turn off device-managed steering capability if not enabled */ @@ -2100,13 +2104,16 @@ struct mlx4_config_dev { __be32 rsvd1[3]; __be16 vxlan_udp_dport; __be16 rsvd2; - __be32 rsvd3[27]; - __be16 rsvd4; - u8 rsvd5; + __be32 rsvd3; + __be32 roce_flags; + __be32 rsvd4[25]; + __be16 rsvd5; + u8 rsvd6; u8 rx_checksum_val; }; #define MLX4_VXLAN_UDP_DPORT (1 << 0) +#define MLX4_DISABLE_RX_PORT BIT(18) static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) { @@ -2209,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) } EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); +#define CONFIG_DISABLE_RX_PORT BIT(15) +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); + if (dis) + config_dev.roce_flags = + cpu_to_be32(CONFIG_DISABLE_RX_PORT); + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} + +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) +{ + struct mlx4_cmd_mailbox *mailbox; + struct { + __be32 v_port1; + __be32 v_port2; + } *v2p; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + + v2p = mailbox->buf; + v2p->v_port1 = cpu_to_be32(port1); + v2p->v_port2 = cpu_to_be32(port2); + + err = mlx4_cmd(dev, mailbox->dma, 0, + MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index ae95adc78509..7b6d4e9ff603 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -71,6 +71,7 @@ enum { /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, + MLX4_CMD_VIRT_PORT_MAP = 0x5c, MLX4_CMD_GET_OP_REQ = 0x59, /* TPT commands */ @@ -170,6 +171,12 @@ enum { MLX4_CMD_TIME_CLASS_C = 60000, }; +enum { + /* virtual to physical port mapping opcode modifiers */ + MLX4_GET_PORT_VIRT2PHY = 0x0, + MLX4_SET_PORT_VIRT2PHY = 0x1, +}; + enum { MLX4_MAILBOX_SIZE = 4096, MLX4_ACCESS_MEM_ALIGN = 256, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index c95d659a39f2..d9afd99dde39 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -201,7 +201,8 @@ enum { MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, - MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20 + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 }; enum { @@ -253,9 +254,14 @@ enum { MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, + MLX4_BMME_FLAG_PORT_REMAP = 1 << 24, MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, }; +enum { + MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP +}; + enum mlx4_event { MLX4_EVENT_TYPE_COMP = 0x00, MLX4_EVENT_TYPE_PATH_MIG = 0x01, @@ -1378,6 +1384,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port); int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis); +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2); int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 467ccdf94c98..2bbc62aa818a 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -96,6 +96,7 @@ enum { MLX4_QP_BIT_RRE = 1 << 15, MLX4_QP_BIT_RWE = 1 << 14, MLX4_QP_BIT_RAE = 1 << 13, + MLX4_QP_BIT_FPP = 1 << 3, MLX4_QP_BIT_RIC = 1 << 4, }; -- cgit v1.2.3 From 53f33ae295a5098f12218da1400f55ad7df7447c Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Tue, 3 Feb 2015 16:48:33 +0200 Subject: net/mlx4_core: Port aggregation upper layer interface Supply interface functions to bond and unbond ports of a mlx4 internal interfaces. Example for such an interface is the one registered by the mlx4 IB driver under RoCE. There are 1. Functions to go in/out to/from bonded mode 2. Function to remap virtual ports to physical ports The bond_mutex prevents simultaneous access to data that keep status of the device in bonded mode. The upper mlx4 interface marks to the mlx4 core module that they want to be subject for such bonding by setting the MLX4_INTFF_BONDING flag. Interface which goes to/from bonded mode is re-created. The mlx4 Ethernet driver does not set this flag when registering the interface, the IB driver does. Signed-off-by: Moni Shoua Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_resources.c | 8 +- drivers/net/ethernet/mellanox/mlx4/intf.c | 54 +++++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 89 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 + drivers/net/ethernet/mellanox/mlx4/qp.c | 2 + .../net/ethernet/mellanox/mlx4/resource_tracker.c | 3 + include/linux/mlx4/device.h | 1 + include/linux/mlx4/driver.h | 19 +++++ 8 files changed, 177 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index f1a5500ff72d..34f2fdf4fe5d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->mtu_msgmax = 0xff; if (!is_tx && !rss) context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); - if (is_tx) + if (is_tx) { context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); - else + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) + context->params2 |= MLX4_QP_BIT_FPP; + + } else { context->sq_size_stride = ilog2(TXBB_SIZE) - 4; + } context->usr_page = cpu_to_be32(mdev->priv_uar.index); context->local_qpn = cpu_to_be32(qpn); context->pri_path.ackto = 1 & 0x07; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 68d2bad325d5..6fce58718837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -33,11 +33,13 @@ #include #include +#include #include "mlx4.h" struct mlx4_device_context { struct list_head list; + struct list_head bond_list; struct mlx4_interface *intf; void *context; }; @@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf) } EXPORT_SYMBOL_GPL(mlx4_unregister_interface); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; + unsigned long flags; + int ret; + LIST_HEAD(bond_list); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + ret = mlx4_disable_rx_port_check(dev, enable); + if (ret) { + mlx4_err(dev, "Fail to %s rx port check\n", + enable ? "enable" : "disable"); + return ret; + } + if (enable) { + dev->flags |= MLX4_FLAG_BONDED; + } else { + ret = mlx4_virt2phy_port_map(dev, 1, 2); + if (ret) { + mlx4_err(dev, "Fail to reset port map\n"); + return ret; + } + dev->flags &= ~MLX4_FLAG_BONDED; + } + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { + list_add_tail(&dev_ctx->bond_list, &bond_list); + list_del(&dev_ctx->list); + } + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &bond_list, bond_list) { + dev_ctx->intf->remove(dev, dev_ctx->context); + dev_ctx->context = dev_ctx->intf->add(dev); + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + dev_ctx->intf->protocol, enable ? + "enabled" : "disabled"); + } + return 0; +} + void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, unsigned long param) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index cc9f48439244..f3245fe0f442 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1160,6 +1160,91 @@ err_set_port: return err ? err : count; } +int mlx4_bond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (!mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, true); + else + ret = 0; + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is bonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_bond); + +int mlx4_unbond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, false); + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is unbonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_unbond); + + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +{ + u8 port1 = v2p->port1; + u8 port2 = v2p->port2; + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + mutex_lock(&priv->bond_mutex); + + /* zero means keep current mapping for this port */ + if (port1 == 0) + port1 = priv->v2p.port1; + if (port2 == 0) + port2 = priv->v2p.port2; + + if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || + (port2 < 1) || (port2 > MLX4_MAX_PORTS) || + (port1 == 2 && port2 == 1)) { + /* besides boundary checks cross mapping makes + * no sense and therefore not allowed */ + err = -EINVAL; + } else if ((port1 == priv->v2p.port1) && + (port2 == priv->v2p.port2)) { + err = 0; + } else { + err = mlx4_virt2phy_port_map(dev, port1, port2); + if (!err) { + mlx4_dbg(dev, "port map changed: [%d][%d]\n", + port1, port2); + priv->v2p.port1 = port1; + priv->v2p.port2 = port2; + } else { + mlx4_err(dev, "Failed to change port mape: %d\n", err); + } + } + + mutex_unlock(&priv->bond_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_port_map_set); + static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2638,6 +2723,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, spin_lock_init(&priv->ctx_lock); mutex_init(&priv->port_mutex); + mutex_init(&priv->bond_mutex); INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); @@ -2934,6 +3020,9 @@ slave_start: goto err_port; } + priv->v2p.port1 = 1; + priv->v2p.port2 = 2; + err = mlx4_register_device(dev); if (err) goto err_port; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 148dc0945aab..803f17653da7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -885,6 +885,8 @@ struct mlx4_priv { int reserved_mtts; int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; + struct mlx4_port_map v2p; /* cached port mapping configuration */ + struct mutex bond_mutex; /* for bond mode */ __be64 slave_node_guids[MLX4_MFUNC_MAX]; atomic_t opreq_count; @@ -1364,6 +1366,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); /* Returns the VF index of slave */ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable); enum mlx4_zone_flags { MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 1586ecce13c7..2bb8553bd905 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { context->flags &= cpu_to_be32(~(0xf << 28)); context->flags |= cpu_to_be32(states[i + 1] << 28); + if (states[i + 1] != MLX4_QP_STATE_RTR) + context->params2 &= ~MLX4_QP_BIT_FPP; err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 79feeb6b0d87..c5f3dfca226b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2944,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev, qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; optpar = be32_to_cpu(*(__be32 *) inbox->buf); + if (slave != mlx4_master_func_num(dev)) + qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + switch (qp_type) { case MLX4_QP_ST_RC: case MLX4_QP_ST_XRC: diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index d9afd99dde39..977b0b164431 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -70,6 +70,7 @@ enum { MLX4_FLAG_SLAVE = 1 << 3, MLX4_FLAG_SRIOV = 1 << 4, MLX4_FLAG_OLD_REG_MAC = 1 << 6, + MLX4_FLAG_BONDED = 1 << 7 }; enum { diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 022055c8fb26..9553a73d2049 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -49,6 +49,10 @@ enum mlx4_dev_event { MLX4_DEV_EVENT_SLAVE_SHUTDOWN, }; +enum { + MLX4_INTFF_BONDING = 1 << 0 +}; + struct mlx4_interface { void * (*add) (struct mlx4_dev *dev); void (*remove)(struct mlx4_dev *dev, void *context); @@ -57,11 +61,26 @@ struct mlx4_interface { void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); struct list_head list; enum mlx4_protocol protocol; + int flags; }; int mlx4_register_interface(struct mlx4_interface *intf); void mlx4_unregister_interface(struct mlx4_interface *intf); +int mlx4_bond(struct mlx4_dev *dev); +int mlx4_unbond(struct mlx4_dev *dev); +static inline int mlx4_is_bonded(struct mlx4_dev *dev) +{ + return !!(dev->flags & MLX4_FLAG_BONDED); +} + +struct mlx4_port_map { + u8 port1; + u8 port2; +}; + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); + void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); static inline u64 mlx4_mac_to_u64(u8 *addr) -- cgit v1.2.3 From f2dba9c6ff0d9a515b4c3f1b037cd65c8b2a868c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 4 Feb 2015 07:33:23 +1100 Subject: rhashtable: Introduce rhashtable_walk_* Some existing rhashtable users get too intimate with it by walking the buckets directly. This prevents us from easily changing the internals of rhashtable. This patch adds the helpers rhashtable_walk_init/exit/start/next/stop which will replace these custom walkers. They are meant to be usable for both procfs seq_file walks as well as walking by a netlink dump. The iterator structure should fit inside a netlink dump cb structure, with at least one element to spare. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 35 ++++++++++ lib/rhashtable.c | 163 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 198 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e0337844358e..58851275fed9 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -18,6 +18,7 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H +#include #include #include #include @@ -111,6 +112,7 @@ struct rhashtable_params { * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping + * @walkers: List of active walkers * @being_destroyed: True if table is set up for destruction */ struct rhashtable { @@ -121,9 +123,36 @@ struct rhashtable { struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; + struct list_head walkers; bool being_destroyed; }; +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @resize: Resize event occured + */ +struct rhashtable_walker { + struct list_head list; + bool resize; +}; + +/** + * struct rhashtable_iter - Hash table iterator, fits into netlink cb + * @ht: Table to iterate through + * @p: Current pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhashtable_walker *walker; + unsigned int slot; + unsigned int skip; +}; + static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) { return NULLS_MARKER(ht->p.nulls_base + hash); @@ -179,6 +208,12 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, bool (*compare)(void *, void *), void *arg); +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); +void rhashtable_walk_exit(struct rhashtable_iter *iter); +int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); +void *rhashtable_walk_next(struct rhashtable_iter *iter); +void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); + void rhashtable_destroy(struct rhashtable *ht); #define rht_dereference(p, ht) \ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 904b419b72f5..057919164e23 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -484,6 +484,7 @@ static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; + struct rhashtable_walker *walker; ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); @@ -492,6 +493,9 @@ static void rht_deferred_worker(struct work_struct *work) tbl = rht_dereference(ht->tbl, ht); + list_for_each_entry(walker, &ht->walkers, list) + walker->resize = true; + if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) rhashtable_expand(ht); else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) @@ -822,6 +826,164 @@ exit: } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); +/** + * rhashtable_walk_init - Initialise an iterator + * @ht: Table to walk over + * @iter: Hash table Iterator + * + * This function prepares a hash table walk. + * + * Note that if you restart a walk after rhashtable_walk_stop you + * may see the same object twice. Also, you may miss objects if + * there are removals in between rhashtable_walk_stop and the next + * call to rhashtable_walk_start. + * + * For a completely stable walk you should construct your own data + * structure outside the hash table. + * + * This function may sleep so you must not call it from interrupt + * context or with spin locks held. + * + * You must call rhashtable_walk_exit if this function returns + * successfully. + */ +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) +{ + iter->ht = ht; + iter->p = NULL; + iter->slot = 0; + iter->skip = 0; + + iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); + if (!iter->walker) + return -ENOMEM; + + mutex_lock(&ht->mutex); + list_add(&iter->walker->list, &ht->walkers); + mutex_unlock(&ht->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_init); + +/** + * rhashtable_walk_exit - Free an iterator + * @iter: Hash table Iterator + * + * This function frees resources allocated by rhashtable_walk_init. + */ +void rhashtable_walk_exit(struct rhashtable_iter *iter) +{ + mutex_lock(&iter->ht->mutex); + list_del(&iter->walker->list); + mutex_unlock(&iter->ht->mutex); + kfree(iter->walker); +} +EXPORT_SYMBOL_GPL(rhashtable_walk_exit); + +/** + * rhashtable_walk_start - Start a hash table walk + * @iter: Hash table iterator + * + * Start a hash table walk. Note that we take the RCU lock in all + * cases including when we return an error. So you must always call + * rhashtable_walk_stop to clean up. + * + * Returns zero if successful. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may use it immediately + * by calling rhashtable_walk_next. + */ +int rhashtable_walk_start(struct rhashtable_iter *iter) +{ + rcu_read_lock(); + + if (iter->walker->resize) { + iter->slot = 0; + iter->skip = 0; + iter->walker->resize = false; + return -EAGAIN; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_start); + +/** + * rhashtable_walk_next - Return the next object and advance the iterator + * @iter: Hash table iterator + * + * Note that you must call rhashtable_walk_stop when you are finished + * with the walk. + * + * Returns the next object or NULL when the end of the table is reached. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may continue to use it. + */ +void *rhashtable_walk_next(struct rhashtable_iter *iter) +{ + const struct bucket_table *tbl; + struct rhashtable *ht = iter->ht; + struct rhash_head *p = iter->p; + void *obj = NULL; + + tbl = rht_dereference_rcu(ht->tbl, ht); + + if (p) { + p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); + goto next; + } + + for (; iter->slot < tbl->size; iter->slot++) { + int skip = iter->skip; + + rht_for_each_rcu(p, tbl, iter->slot) { + if (!skip) + break; + skip--; + } + +next: + if (!rht_is_a_nulls(p)) { + iter->skip++; + iter->p = p; + obj = rht_obj(ht, p); + goto out; + } + + iter->skip = 0; + } + + iter->p = NULL; + +out: + if (iter->walker->resize) { + iter->p = NULL; + iter->slot = 0; + iter->skip = 0; + iter->walker->resize = false; + return ERR_PTR(-EAGAIN); + } + + return obj; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_next); + +/** + * rhashtable_walk_stop - Finish a hash table walk + * @iter: Hash table iterator + * + * Finish a hash table walk. + */ +void rhashtable_walk_stop(struct rhashtable_iter *iter) +{ + rcu_read_unlock(); + iter->p = NULL; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_stop); + static size_t rounded_hashtable_size(struct rhashtable_params *params) { return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), @@ -894,6 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); memcpy(&ht->p, params, sizeof(*params)); + INIT_LIST_HEAD(&ht->walkers); if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); -- cgit v1.2.3 From a9b2c06dbef48ed31cff1764c5ce824829106f4f Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 6 Feb 2015 16:04:39 -0500 Subject: tcp: mitigate ACK loops for connections as tcp_request_sock In the SYN_RECV state, where the TCP connection is represented by tcp_request_sock, we now rate-limit SYNACKs in response to a client's retransmitted SYNs: we do not send a SYNACK in response to client SYN if it has been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we last sent a SYNACK in response to a client's retransmitted SYN. This allows the vast majority of legitimate client connections to proceed unimpeded, even for the most aggressive platforms, iOS and MacOS, which actually retransmit SYNs 1-second intervals for several times in a row. They use SYN RTO timeouts following the progression: 1,1,1,1,1,2,4,8,16,32. Reported-by: Avery Fay Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + include/net/tcp.h | 1 + net/ipv4/tcp_minisocks.c | 6 +++++- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 67309ece0772..bcc828d3b9b9 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -115,6 +115,7 @@ struct tcp_request_sock { u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ + u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# * after data-in-SYN. diff --git a/include/net/tcp.h b/include/net/tcp.h index b81f45c67b2e..da4196fb78db 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1145,6 +1145,7 @@ static inline void tcp_openreq_init(struct request_sock *req, tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->snt_synack = tcp_time_stamp; + tcp_rsk(req)->last_oow_ack_time = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; ireq->tstamp_ok = rx_opt->tstamp_ok; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bc9216dc9de1..131aa4950d1c 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -605,7 +605,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * Reset timer after retransmitting SYNACK, similar to * the idea of fast retransmit in recovery. */ - if (!inet_rtx_syn_ack(sk, req)) + if (!tcp_oow_rate_limited(sock_net(sk), skb, + LINUX_MIB_TCPACKSKIPPEDSYNRECV, + &tcp_rsk(req)->last_oow_ack_time) && + + !inet_rtx_syn_ack(sk, req)) req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX) + jiffies; return NULL; -- cgit v1.2.3 From f2b2c582e82429270d5818fbabe653f4359d7024 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 6 Feb 2015 16:04:40 -0500 Subject: tcp: mitigate ACK loops for connections as tcp_sock Ensure that in state ESTABLISHED, where the connection is represented by a tcp_sock, we rate limit dupacks in response to incoming packets (a) with TCP timestamps that fail PAWS checks, or (b) with sequence numbers or ACK numbers that are out of the acceptable window. We do not send a dupack in response to out-of-window packets if it has been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we last sent a dupack in response to an out-of-window packet. There is already a similar (although global) rate-limiting mechanism for "challenge ACKs". When deciding whether to send a challence ACK, we first consult the new per-connection rate limit, and then the global rate limit. Reported-by: Avery Fay Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + net/ipv4/tcp_input.c | 29 ++++++++++++++++++++++------- net/ipv4/tcp_minisocks.c | 1 + 3 files changed, 24 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index bcc828d3b9b9..66d85a80a1ec 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -153,6 +153,7 @@ struct tcp_sock { u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ + u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ u32 tsoffset; /* timestamp offset */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9401aa43b814..8fdd27b17306 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3322,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 } /* RFC 5961 7 [ACK Throttling] */ -static void tcp_send_challenge_ack(struct sock *sk) +static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; - u32 now = jiffies / HZ; + struct tcp_sock *tp = tcp_sk(sk); + u32 now; + + /* First check our per-socket dupack rate limit. */ + if (tcp_oow_rate_limited(sock_net(sk), skb, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE, + &tp->last_oow_ack_time)) + return; + /* Then check the check host-wide RFC 5961 rate limit. */ + now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; @@ -3424,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (before(ack, prior_snd_una)) { /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ if (before(ack, prior_snd_una - tp->max_window)) { - tcp_send_challenge_ack(sk); + tcp_send_challenge_ack(sk, skb); return -1; } goto old_ack; @@ -4993,7 +5002,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); - tcp_send_dupack(sk, skb); + if (!tcp_oow_rate_limited(sock_net(sk), skb, + LINUX_MIB_TCPACKSKIPPEDPAWS, + &tp->last_oow_ack_time)) + tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ @@ -5010,7 +5022,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (!th->rst) { if (th->syn) goto syn_challenge; - tcp_send_dupack(sk, skb); + if (!tcp_oow_rate_limited(sock_net(sk), skb, + LINUX_MIB_TCPACKSKIPPEDSEQ, + &tp->last_oow_ack_time)) + tcp_send_dupack(sk, skb); } goto discard; } @@ -5026,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) tcp_reset(sk); else - tcp_send_challenge_ack(sk); + tcp_send_challenge_ack(sk, skb); goto discard; } @@ -5040,7 +5055,7 @@ syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); - tcp_send_challenge_ack(sk); + tcp_send_challenge_ack(sk, skb); goto discard; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 131aa4950d1c..98a840561ec8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -467,6 +467,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, tcp_enable_early_retrans(newtp); newtp->tlp_high_seq = 0; newtp->lsndtime = treq->snt_synack; + newtp->last_oow_ack_time = 0; newtp->total_retrans = req->num_retrans; /* So many TCP implementations out there (incorrectly) count the -- cgit v1.2.3 From 4fb17a6091674f469e8ac85dc770fbf9a9ba7cc8 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 6 Feb 2015 16:04:41 -0500 Subject: tcp: mitigate ACK loops for connections as tcp_timewait_sock Ensure that in state FIN_WAIT2 or TIME_WAIT, where the connection is represented by a tcp_timewait_sock, we rate limit dupacks in response to incoming packets (a) with TCP timestamps that fail PAWS checks, or (b) with sequence numbers that are out of the acceptable window. We do not send a dupack in response to out-of-window packets if it has been less than sysctl_tcp_invalid_ratelimit (default 500ms) since we last sent a dupack in response to an out-of-window packet. Reported-by: Avery Fay Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 4 ++++ net/ipv4/tcp_minisocks.c | 29 ++++++++++++++++++++++++----- 2 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 66d85a80a1ec..1a7adb411647 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -342,6 +342,10 @@ struct tcp_timewait_sock { u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; + + /* The time we sent the last out-of-window ACK: */ + u32 tw_last_oow_ack_time; + long tw_ts_recent_stamp; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 98a840561ec8..dd11ac7798c6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) return seq == e_win && seq == end_seq; } +static enum tcp_tw_status +tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, + const struct sk_buff *skb, int mib_idx) +{ + struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); + + if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, + &tcptw->tw_last_oow_ack_time)) { + /* Send ACK. Note, we do not put the bucket, + * it will be released by caller. + */ + return TCP_TW_ACK; + } + + /* We are rate-limiting, so just release the tw sock and drop skb. */ + inet_twsk_put(tw); + return TCP_TW_SUCCESS; +} + /* * * Main purpose of TIME-WAIT state is to close connection gracefully, * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN @@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt, tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) - return TCP_TW_ACK; + return tcp_timewait_check_oow_rate_limit( + tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); if (th->rst) goto kill; @@ -250,10 +270,8 @@ kill: inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, TCP_TIMEWAIT_LEN); - /* Send ACK. Note, we do not put the bucket, - * it will be released by caller. - */ - return TCP_TW_ACK; + return tcp_timewait_check_oow_rate_limit( + tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); } inet_twsk_put(tw); return TCP_TW_SUCCESS; @@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_offset = tp->tsoffset; + tcptw->tw_last_oow_ack_time = 0; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { -- cgit v1.2.3 From 096a4cfa5807aa89c78ce12309c0b1c10cf88184 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 6 Feb 2015 18:54:19 +0100 Subject: net: fix a typo in skb_checksum_validate_zero_check Remove trailing underscore. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 111e665455c3..1bb36edb66b9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3072,7 +3072,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) #define skb_checksum_validate_zero_check(skb, proto, check, \ compute_pseudo) \ - __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) + __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) -- cgit v1.2.3 From 567e4b79731c352a17d73c483959f795d3593e03 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 6 Feb 2015 12:59:01 -0800 Subject: net: rfs: add hash collision detection Receive Flow Steering is a nice solution but suffers from hash collisions when a mix of connected and unconnected traffic is received on the host, when flow hash table is populated. Also, clearing flow in inet_release() makes RFS not very good for short lived flows, as many packets can follow close(). (FIN , ACK packets, ...) This patch extends the information stored into global hash table to not only include cpu number, but upper part of the hash value. I use a 32bit value, and dynamically split it in two parts. For host with less than 64 possible cpus, this gives 6 bits for the cpu number, and 26 (32-6) bits for the upper part of the hash. Since hash bucket selection use low order bits of the hash, we have a full hash match, if /proc/sys/net/core/rps_sock_flow_entries is big enough. If the hash found in flow table does not match, we fallback to RPS (if it is enabled for the rxqueue). This means that a packet for an non connected flow can avoid the IPI through a unrelated/victim CPU. This also means we no longer have to clear the table at socket close time, and this helps short lived flows performance. Signed-off-by: Eric Dumazet Acked-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/tun.c | 5 +---- include/linux/netdevice.h | 34 ++++++++++++++++---------------- include/net/sock.h | 24 +---------------------- net/core/dev.c | 48 ++++++++++++++++++++++++++-------------------- net/core/sysctl_net_core.c | 2 +- net/ipv4/af_inet.c | 2 -- 6 files changed, 47 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ad7d3d5f3ee5..857dca47bf80 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -256,7 +256,6 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) { tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", e->rxhash, e->queue_index); - sock_rps_reset_flow_hash(e->rps_rxhash); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); --tun->flow_count; @@ -373,10 +372,8 @@ unlock: */ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) { - if (unlikely(e->rps_rxhash != hash)) { - sock_rps_reset_flow_hash(e->rps_rxhash); + if (unlikely(e->rps_rxhash != hash)) e->rps_rxhash = hash; - } } /* We try to identify a flow through its rxhash first. The reason that diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ce784d5018e0..ab3b7cef4638 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -644,39 +644,39 @@ struct rps_dev_flow_table { /* * The rps_sock_flow_table contains mappings of flows to the last CPU * on which they were processed by the application (set in recvmsg). + * Each entry is a 32bit value. Upper part is the high order bits + * of flow hash, lower part is cpu number. + * rps_cpu_mask is used to partition the space, depending on number of + * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 + * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, + * meaning we use 32-6=26 bits for the hash. */ struct rps_sock_flow_table { - unsigned int mask; - u16 ents[0]; + u32 mask; + u32 ents[0]; }; -#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ - ((_num) * sizeof(u16))) +#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) #define RPS_NO_CPU 0xffff +extern u32 rps_cpu_mask; +extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; + static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, u32 hash) { if (table && hash) { - unsigned int cpu, index = hash & table->mask; + unsigned int index = hash & table->mask; + u32 val = hash & ~rps_cpu_mask; /* We only give a hint, preemption can change cpu under us */ - cpu = raw_smp_processor_id(); + val |= raw_smp_processor_id(); - if (table->ents[index] != cpu) - table->ents[index] = cpu; + if (table->ents[index] != val) + table->ents[index] = val; } } -static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, - u32 hash) -{ - if (table && hash) - table->ents[hash & table->mask] = RPS_NO_CPU; -} - -extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; - #ifdef CONFIG_RFS_ACCEL bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id); diff --git a/include/net/sock.h b/include/net/sock.h index d28b8fededd6..e13824570b0f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -857,18 +857,6 @@ static inline void sock_rps_record_flow_hash(__u32 hash) #endif } -static inline void sock_rps_reset_flow_hash(__u32 hash) -{ -#ifdef CONFIG_RPS - struct rps_sock_flow_table *sock_flow_table; - - rcu_read_lock(); - sock_flow_table = rcu_dereference(rps_sock_flow_table); - rps_reset_sock_flow(sock_flow_table, hash); - rcu_read_unlock(); -#endif -} - static inline void sock_rps_record_flow(const struct sock *sk) { #ifdef CONFIG_RPS @@ -876,28 +864,18 @@ static inline void sock_rps_record_flow(const struct sock *sk) #endif } -static inline void sock_rps_reset_flow(const struct sock *sk) -{ -#ifdef CONFIG_RPS - sock_rps_reset_flow_hash(sk->sk_rxhash); -#endif -} - static inline void sock_rps_save_rxhash(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_RPS - if (unlikely(sk->sk_rxhash != skb->hash)) { - sock_rps_reset_flow(sk); + if (unlikely(sk->sk_rxhash != skb->hash)) sk->sk_rxhash = skb->hash; - } #endif } static inline void sock_rps_reset_rxhash(struct sock *sk) { #ifdef CONFIG_RPS - sock_rps_reset_flow(sk); sk->sk_rxhash = 0; #endif } diff --git a/net/core/dev.c b/net/core/dev.c index a3a96ffc67f4..8be38675e1a8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3030,6 +3030,8 @@ static inline void ____napi_schedule(struct softnet_data *sd, /* One global table that all flow-based protocols share. */ struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); +u32 rps_cpu_mask __read_mostly; +EXPORT_SYMBOL(rps_cpu_mask); struct static_key rps_needed __read_mostly; @@ -3086,16 +3088,17 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { - struct netdev_rx_queue *rxqueue; - struct rps_map *map; + const struct rps_sock_flow_table *sock_flow_table; + struct netdev_rx_queue *rxqueue = dev->_rx; struct rps_dev_flow_table *flow_table; - struct rps_sock_flow_table *sock_flow_table; + struct rps_map *map; int cpu = -1; - u16 tcpu; + u32 tcpu; u32 hash; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); + if (unlikely(index >= dev->real_num_rx_queues)) { WARN_ONCE(dev->real_num_rx_queues > 1, "%s received packet on queue %u, but number " @@ -3103,39 +3106,40 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, dev->name, index, dev->real_num_rx_queues); goto done; } - rxqueue = dev->_rx + index; - } else - rxqueue = dev->_rx; + rxqueue += index; + } + /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ + + flow_table = rcu_dereference(rxqueue->rps_flow_table); map = rcu_dereference(rxqueue->rps_map); - if (map) { - if (map->len == 1 && - !rcu_access_pointer(rxqueue->rps_flow_table)) { - tcpu = map->cpus[0]; - if (cpu_online(tcpu)) - cpu = tcpu; - goto done; - } - } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { + if (!flow_table && !map) goto done; - } skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) goto done; - flow_table = rcu_dereference(rxqueue->rps_flow_table); sock_flow_table = rcu_dereference(rps_sock_flow_table); if (flow_table && sock_flow_table) { - u16 next_cpu; struct rps_dev_flow *rflow; + u32 next_cpu; + u32 ident; + + /* First check into global flow table if there is a match */ + ident = sock_flow_table->ents[hash & sock_flow_table->mask]; + if ((ident ^ hash) & ~rps_cpu_mask) + goto try_rps; + next_cpu = ident & rps_cpu_mask; + + /* OK, now we know there is a match, + * we can look at the local (per receive queue) flow table + */ rflow = &flow_table->flows[hash & flow_table->mask]; tcpu = rflow->cpu; - next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask]; - /* * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow @@ -3162,6 +3166,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } } +try_rps: + if (map) { tcpu = map->cpus[reciprocal_scale(hash, map->len)]; if (cpu_online(tcpu)) { diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index fde21d19e61b..7a31be5e361f 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -65,7 +65,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, mutex_unlock(&sock_flow_mutex); return -ENOMEM; } - + rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; sock_table->mask = size - 1; } else sock_table = orig_sock_table; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a44773c8346c..d2e49baaff63 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -395,8 +395,6 @@ int inet_release(struct socket *sock) if (sk) { long timeout; - sock_rps_reset_flow(sk); - /* Applications forget to leave groups before exiting */ ip_mc_drop_socket(sk); -- cgit v1.2.3 From 93c1af6ca94c1e763efba76a127b5c135e3d23a6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 8 Feb 2015 20:39:13 -0800 Subject: net:rfs: adjust table size checking Make sure root user does not try something stupid. Also make sure mask field in struct rps_sock_flow_table does not share a cache line with the potentially often dirtied flow table. Signed-off-by: Eric Dumazet Fixes: 567e4b79731c ("net: rfs: add hash collision detection") Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++- net/core/sysctl_net_core.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ab3b7cef4638..d115256ed5a2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -653,7 +653,8 @@ struct rps_dev_flow_table { */ struct rps_sock_flow_table { u32 mask; - u32 ents[0]; + + u32 ents[0] ____cacheline_aligned_in_smp; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 7a31be5e361f..eaa51ddf2368 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -52,7 +52,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, if (write) { if (size) { - if (size > 1<<30) { + if (size > 1<<29) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; -- cgit v1.2.3 From 35f05dabf95ac3ebc4c15bafd6833f7a3046e66f Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 8 Feb 2015 11:49:34 +0200 Subject: IB/mlx4: Reset flow support for IB kernel ULPs The driver exposes interfaces that directly relate to HW state. Upon fatal error, consumers of these interfaces (ULPs) that rely on completion of all their posted work-request could hang, thereby introducing dependencies in shutdown order. To prevent this from happening, we manage the relevant resources (CQs, QPs) that are used by the device. Upon a fatal error, we now generate simulated completions for outstanding WQEs that were not completed at the time the HW was reset. It includes invoking the completion event handler for all involved CQs so that the ULPs will poll those CQs. When polled we return simulated CQEs with IB_WC_WR_FLUSH_ERR return code enabling ULPs to clean up their resources and not wait forever for completions upon receiving remove_one. The above change requires an extra check in the data path to make sure that when device is in error state, the simulated CQEs will be returned and no further WQEs will be posted. Signed-off-by: Yishai Hadas Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/cq.c | 57 ++++++++++++++++++++++++++++++++ drivers/infiniband/hw/mlx4/main.c | 64 ++++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/mlx4/mlx4_ib.h | 9 +++++ drivers/infiniband/hw/mlx4/qp.c | 59 +++++++++++++++++++++++++++++---- drivers/infiniband/hw/mlx4/srq.c | 8 +++++ include/linux/mlx4/device.h | 2 ++ 6 files changed, 193 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a3b70f6c4035..543ecdd8667b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; + INIT_LIST_HEAD(&cq->send_qp_list); + INIT_LIST_HEAD(&cq->recv_qp_list); if (context) { struct mlx4_ib_create_cq ucmd; @@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct return 0; } +static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, + struct ib_wc *wc, int *npolled, int is_send) +{ + struct mlx4_ib_wq *wq; + unsigned cur; + int i; + + wq = is_send ? &qp->sq : &qp->rq; + cur = wq->head - wq->tail; + + if (cur == 0) + return; + + for (i = 0; i < cur && *npolled < num_entries; i++) { + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR; + wq->tail++; + (*npolled)++; + wc->qp = &qp->ibqp; + wc++; + } +} + +static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, + struct ib_wc *wc, int *npolled) +{ + struct mlx4_ib_qp *qp; + + *npolled = 0; + /* Find uncompleted WQEs belonging to that cq and retrun + * simulated FLUSH_ERR completions + */ + list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { + mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); + if (*npolled >= num_entries) + goto out; + } + + list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { + mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); + if (*npolled >= num_entries) + goto out; + } + +out: + return; +} + static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_ib_qp **cur_qp, struct ib_wc *wc) @@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) unsigned long flags; int npolled; int err = 0; + struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); spin_lock_irqsave(&cq->lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); + goto out; + } for (npolled = 0; npolled < num_entries; ++npolled) { err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); @@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) mlx4_cq_set_ci(&cq->mcq); +out: spin_unlock_irqrestore(&cq->lock, flags); if (err == 0 || err == -EAGAIN) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3140da518a07..eb8e215f1613 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2308,6 +2308,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); + INIT_LIST_HEAD(&ibdev->qp_list); + spin_lock_init(&ibdev->reset_flow_resource_lock); if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && ib_num_ports) { @@ -2622,6 +2624,67 @@ out: return; } +static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) +{ + struct mlx4_ib_qp *mqp; + unsigned long flags_qp; + unsigned long flags_cq; + struct mlx4_ib_cq *send_mcq, *recv_mcq; + struct list_head cq_notify_list; + struct mlx4_cq *mcq; + unsigned long flags; + + pr_warn("mlx4_ib_handle_catas_error was started\n"); + INIT_LIST_HEAD(&cq_notify_list); + + /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ + spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); + + list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { + spin_lock_irqsave(&mqp->sq.lock, flags_qp); + if (mqp->sq.tail != mqp->sq.head) { + send_mcq = to_mcq(mqp->ibqp.send_cq); + spin_lock_irqsave(&send_mcq->lock, flags_cq); + if (send_mcq->mcq.comp && + mqp->ibqp.send_cq->comp_handler) { + if (!send_mcq->mcq.reset_notify_added) { + send_mcq->mcq.reset_notify_added = 1; + list_add_tail(&send_mcq->mcq.reset_notify, + &cq_notify_list); + } + } + spin_unlock_irqrestore(&send_mcq->lock, flags_cq); + } + spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); + /* Now, handle the QP's receive queue */ + spin_lock_irqsave(&mqp->rq.lock, flags_qp); + /* no handling is needed for SRQ */ + if (!mqp->ibqp.srq) { + if (mqp->rq.tail != mqp->rq.head) { + recv_mcq = to_mcq(mqp->ibqp.recv_cq); + spin_lock_irqsave(&recv_mcq->lock, flags_cq); + if (recv_mcq->mcq.comp && + mqp->ibqp.recv_cq->comp_handler) { + if (!recv_mcq->mcq.reset_notify_added) { + recv_mcq->mcq.reset_notify_added = 1; + list_add_tail(&recv_mcq->mcq.reset_notify, + &cq_notify_list); + } + } + spin_unlock_irqrestore(&recv_mcq->lock, + flags_cq); + } + } + spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); + } + + list_for_each_entry(mcq, &cq_notify_list, reset_notify) { + mcq->comp(mcq); + } + spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); + pr_warn("mlx4_ib_handle_catas_error ended\n"); +} + static void handle_bonded_port_state_event(struct work_struct *work) { struct ib_event_work *ew = @@ -2701,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; + mlx4_ib_handle_catas_error(ibdev); break; case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 721540c9163d..f829fd935b79 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -110,6 +110,9 @@ struct mlx4_ib_cq { struct mutex resize_mutex; struct ib_umem *umem; struct ib_umem *resize_umem; + /* List of qps that it serves.*/ + struct list_head send_qp_list; + struct list_head recv_qp_list; }; struct mlx4_ib_mr { @@ -300,6 +303,9 @@ struct mlx4_ib_qp { struct mlx4_roce_smac_vlan_info pri; struct mlx4_roce_smac_vlan_info alt; u64 reg_id; + struct list_head qps_list; + struct list_head cq_recv_list; + struct list_head cq_send_list; }; struct mlx4_ib_srq { @@ -535,6 +541,9 @@ struct mlx4_ib_dev { /* lock when destroying qp1_proxy and getting netdev events */ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; u8 bond_next_port; + /* protect resources needed as part of reset flow */ + spinlock_t reset_flow_resource_lock; + struct list_head qp_list; }; struct ib_event_work { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 792f9dc86ada..dfc6ca128a7e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -46,6 +46,11 @@ #include "mlx4_ib.h" #include "user.h" +static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, + struct mlx4_ib_cq *recv_cq); +static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, + struct mlx4_ib_cq *recv_cq); + enum { MLX4_IB_ACK_REQ_FREQ = 8, }; @@ -618,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; + struct mlx4_ib_cq *mcq; + unsigned long flags; /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { @@ -828,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, qp->mqp.event = mlx4_ib_qp_event; if (!*caller_qp) *caller_qp = qp; + + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); + mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + /* Maintain device to QPs access, needed for further handling + * via reset flow + */ + list_add_tail(&qp->qps_list, &dev->qp_list); + /* Maintain CQ to QPs access, needed for further handling + * via reset flow + */ + mcq = to_mcq(init_attr->send_cq); + list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); + mcq = to_mcq(init_attr->recv_cq); + list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); + mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0; err_qpn: @@ -886,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } @@ -902,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re { if (send_cq == recv_cq) { __release(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); + spin_unlock(&recv_cq->lock); } } @@ -953,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; + unsigned long flags; if (qp->state != IB_QPS_RESET) { if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), @@ -984,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, get_cqs(qp, &send_cq, &recv_cq); + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx4_ib_lock_cqs(send_cq, recv_cq); + /* del from lists under both locks above to protect reset flow paths */ + list_del(&qp->qps_list); + list_del(&qp->cq_send_list); + list_del(&qp->cq_recv_list); if (!is_user) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); @@ -996,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); mlx4_qp_free(dev->dev, &qp->mqp); @@ -2618,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, __be32 uninitialized_var(lso_hdr_sz); __be32 blh; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); spin_lock_irqsave(&qp->sq.lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } ind = qp->sq_next_wqe; @@ -2917,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, int ind; int max_gs; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); max_gs = qp->rq.max_gs; spin_lock_irqsave(&qp->rq.lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } + ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 62d9285300af..dce5dfe3a70e 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, int err = 0; int nreq; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); spin_lock_irqsave(&srq->lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) { @@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, *srq->db.db = cpu_to_be32(srq->wqe_ctr); } +out: spin_unlock_irqrestore(&srq->lock, flags); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index c116cb02475c..e4ebff7e9d02 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -689,6 +689,8 @@ struct mlx4_cq { void (*comp)(struct mlx4_cq *); void *priv; } tasklet_ctx; + int reset_notify_added; + struct list_head reset_notify; }; struct mlx4_qp { -- cgit v1.2.3